aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmir Ayupov <aaupov@fb.com>2025-05-12 17:59:01 -0700
committerAmir Ayupov <aaupov@fb.com>2025-05-12 17:59:01 -0700
commit8fb71a821d84d6db5a8501dd634e3eab41ffde03 (patch)
treee846d5f5a8d902098bf6a612f8d635ff61c8d3fa
parent619e809047c8f00be86f481b39038f1803d48c62 (diff)
parentf2351d9e7f2e13883d15915ded79a0e931679fde (diff)
downloadllvm-users/aaupov/spr/main.boltnfc-fold-processpreaggregated-into-processbranchevents.zip
llvm-users/aaupov/spr/main.boltnfc-fold-processpreaggregated-into-processbranchevents.tar.gz
llvm-users/aaupov/spr/main.boltnfc-fold-processpreaggregated-into-processbranchevents.tar.bz2
Created using spr 1.3.4 [skip ci]
-rw-r--r--bolt/include/bolt/Core/BinaryFunction.h4
-rw-r--r--bolt/include/bolt/Profile/ProfileYAMLMapping.h4
-rw-r--r--bolt/lib/Core/BinaryFunctionProfile.cpp2
-rw-r--r--bolt/lib/Passes/MCF.cpp4
-rw-r--r--bolt/lib/Profile/DataAggregator.cpp8
-rw-r--r--bolt/lib/Profile/DataReader.cpp10
-rw-r--r--bolt/lib/Profile/YAMLProfileReader.cpp4
-rw-r--r--bolt/lib/Profile/YAMLProfileWriter.cpp2
-rw-r--r--bolt/lib/Rewrite/BuildIDRewriter.cpp3
-rw-r--r--bolt/tools/merge-fdata/merge-fdata.cpp4
-rw-r--r--clang-tools-extra/clangd/ClangdServer.cpp1
-rw-r--r--clang-tools-extra/clangd/CodeComplete.cpp12
-rw-r--r--clang-tools-extra/clangd/CodeComplete.h3
-rw-r--r--clang-tools-extra/clangd/CompileCommands.cpp3
-rw-r--r--clang-tools-extra/clangd/Config.h7
-rw-r--r--clang-tools-extra/clangd/ConfigCompile.cpp11
-rw-r--r--clang-tools-extra/clangd/ConfigFragment.h5
-rw-r--r--clang-tools-extra/clangd/ConfigYAML.cpp4
-rw-r--r--clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp34
-rw-r--r--clang-tools-extra/clangd/unittests/ConfigYAMLTests.cpp13
-rw-r--r--clang-tools-extra/modularize/Modularize.cpp7
-rw-r--r--clang-tools-extra/modularize/PreprocessorTracker.cpp15
-rw-r--r--clang/docs/ReleaseNotes.rst6
-rw-r--r--clang/include/clang/AST/Decl.h37
-rw-r--r--clang/include/clang/AST/DeclTemplate.h9
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h2
-rw-r--r--clang/include/clang/AST/TextNodeDumper.h1
-rw-r--r--clang/include/clang/Analysis/CFG.h10
-rw-r--r--clang/include/clang/Basic/Attr.td11
-rw-r--r--clang/include/clang/Basic/AttrDocs.td11
-rw-r--r--clang/include/clang/Basic/DeclNodes.td1
-rw-r--r--clang/include/clang/Basic/DiagnosticParseKinds.td3
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td2
-rw-r--r--clang/include/clang/Basic/SourceManager.h9
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROps.td36
-rw-r--r--clang/include/clang/CIR/MissingFeatures.h1
-rw-r--r--clang/include/clang/Driver/Compilation.h10
-rw-r--r--clang/include/clang/Parse/Parser.h4
-rw-r--r--clang/include/clang/Sema/SemaHLSL.h1
-rw-r--r--clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h11
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def13
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h4
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h3
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h15
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h37
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h8
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h71
-rw-r--r--clang/lib/AST/CMakeLists.txt1
-rw-r--r--clang/lib/AST/Decl.cpp32
-rw-r--r--clang/lib/AST/DeclBase.cpp1
-rw-r--r--clang/lib/AST/DeclTemplate.cpp16
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp7
-rw-r--r--clang/lib/Analysis/CFG.cpp46
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp40
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypes.cpp13
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h27
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp11
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h10
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp3
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp6
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp1
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp3
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp6
-rw-r--r--clang/lib/Driver/Driver.cpp19
-rw-r--r--clang/lib/Driver/Job.cpp2
-rw-r--r--clang/lib/Driver/ToolChain.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp4
-rw-r--r--clang/lib/ExtractAPI/ExtractAPIConsumer.cpp3
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp6
-rw-r--r--clang/lib/Frontend/PrintPreprocessedOutput.cpp3
-rw-r--r--clang/lib/Headers/cuda_wrappers/cmath50
-rw-r--r--clang/lib/Lex/PPDirectives.cpp8
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp88
-rw-r--r--clang/lib/Parse/ParseOpenACC.cpp5
-rw-r--r--clang/lib/Parse/ParseOpenMP.cpp160
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp3
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp27
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp14
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp5
-rw-r--r--clang/lib/Serialization/ASTCommon.cpp1
-rw-r--r--clang/lib/Serialization/ModuleManager.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp78
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp55
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/Iterator.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/Iterator.h3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp93
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp26
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp26
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp43
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Core/CallEvent.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp106
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp53
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp13
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp14
-rw-r--r--clang/lib/StaticAnalyzer/Core/LoopWidening.cpp29
-rw-r--r--clang/lib/StaticAnalyzer/Core/ProgramState.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Core/RegionStore.cpp53
-rw-r--r--clang/lib/StaticAnalyzer/Core/SValBuilder.cpp32
-rw-r--r--clang/lib/StaticAnalyzer/Core/SymbolManager.cpp2
-rw-r--r--clang/test/AST/HLSL/RootSignatures-AST.hlsl75
-rw-r--r--clang/test/Analysis/PR57270.cpp30
-rw-r--r--clang/test/Analysis/analyzer-config.c1
-rw-r--r--clang/test/Analysis/container-modeling.cpp4
-rw-r--r--clang/test/Analysis/dump_egraph.cpp1
-rw-r--r--clang/test/Analysis/explain-svals.cpp12
-rw-r--r--clang/test/Analysis/explain-svals.m4
-rw-r--r--clang/test/Analysis/loop-based-inlining-prevention.c200
-rw-r--r--clang/test/Analysis/loop-unrolling.cpp30
-rw-r--r--clang/test/CIR/CodeGen/basic.c26
-rw-r--r--clang/test/CIR/CodeGen/basic.cpp30
-rw-r--r--clang/test/CIR/CodeGen/vector-ext.cpp123
-rw-r--r--clang/test/CIR/CodeGen/vector.cpp123
-rw-r--r--clang/test/CIR/IR/vector.cir38
-rw-r--r--clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c2
-rw-r--r--clang/test/CodeGen/cfi-check-fail-debuginfo.c45
-rw-r--r--clang/test/CodeGen/cfi-icall-generalize-debuginfo.c126
-rw-r--r--clang/test/CodeGen/cfi-icall-normalize2-debuginfo.c119
-rw-r--r--clang/test/Driver/no-integrated-cpp.c83
-rw-r--r--clang/test/Driver/print-supported-extensions-riscv.c1
-rw-r--r--clang/test/Driver/rewrite-objc-preproc.m5
-rw-r--r--clang/test/Modules/no-external-type-id.cppm2
-rw-r--r--clang/test/OpenMP/begin_declare_variant_executable_scope.c23
-rw-r--r--clang/test/OpenMP/begin_declare_variant_messages.c9
-rw-r--r--clang/test/OpenMP/cancel_messages.cpp5
-rw-r--r--clang/test/OpenMP/for_collapse_messages.cpp2
-rw-r--r--clang/test/OpenMP/for_ordered_clause.cpp3
-rw-r--r--clang/test/OpenMP/for_simd_collapse_messages.cpp2
-rw-r--r--clang/test/OpenMP/for_simd_loop_messages.cpp3
-rw-r--r--clang/test/OpenMP/masked_taskloop_collapse_messages.cpp2
-rw-r--r--clang/test/OpenMP/masked_taskloop_simd_collapse_messages.cpp2
-rw-r--r--clang/test/OpenMP/simd_collapse_messages.cpp11
-rw-r--r--clang/test/ParserOpenACC/parse-clauses.c8
-rw-r--r--clang/test/SemaHLSL/RootSignature-err.hlsl20
-rw-r--r--clang/tools/libclang/CIndex.cpp1
-rw-r--r--clang/unittests/Tooling/DependencyScanning/DependencyScanningFilesystemTest.cpp2
-rw-r--r--clang/utils/TableGen/ClangDiagnosticsEmitter.cpp10
-rw-r--r--clang/utils/TableGen/ClangOptionDocEmitter.cpp5
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h15
-rw-r--r--flang/include/flang/Evaluate/tools.h38
-rw-r--r--flang/include/flang/Evaluate/traverse.h3
-rw-r--r--flang/include/flang/Evaluate/variable.h41
-rw-r--r--flang/include/flang/Parser/parse-tree.h4
-rw-r--r--flang/include/flang/Parser/token-sequence.h2
-rw-r--r--flang/include/flang/Support/Fortran-features.h2
-rw-r--r--flang/lib/Evaluate/check-expression.cpp5
-rw-r--r--flang/lib/Evaluate/fold.cpp13
-rw-r--r--flang/lib/Evaluate/formatting.cpp26
-rw-r--r--flang/lib/Evaluate/intrinsics.cpp10
-rw-r--r--flang/lib/Evaluate/shape.cpp15
-rw-r--r--flang/lib/Evaluate/tools.cpp33
-rw-r--r--flang/lib/Evaluate/variable.cpp57
-rw-r--r--flang/lib/Lower/Bridge.cpp9
-rw-r--r--flang/lib/Lower/ConvertCall.cpp42
-rw-r--r--flang/lib/Lower/OpenACC.cpp24
-rw-r--r--flang/lib/Lower/OpenMP/ClauseProcessor.cpp20
-rw-r--r--flang/lib/Lower/OpenMP/ClauseProcessor.h6
-rw-r--r--flang/lib/Lower/OpenMP/Clauses.cpp2
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp241
-rw-r--r--flang/lib/Lower/Support/Utils.cpp11
-rw-r--r--flang/lib/Parser/openmp-parsers.cpp5
-rw-r--r--flang/lib/Parser/parsing.cpp7
-rw-r--r--flang/lib/Parser/prescan.cpp196
-rw-r--r--flang/lib/Parser/prescan.h5
-rw-r--r--flang/lib/Parser/token-sequence.cpp6
-rw-r--r--flang/lib/Semantics/check-allocate.cpp10
-rw-r--r--flang/lib/Semantics/check-call.cpp45
-rw-r--r--flang/lib/Semantics/check-coarray.cpp29
-rw-r--r--flang/lib/Semantics/check-coarray.h3
-rw-r--r--flang/lib/Semantics/check-declarations.cpp27
-rw-r--r--flang/lib/Semantics/dump-expr.cpp1
-rw-r--r--flang/lib/Semantics/expression.cpp88
-rw-r--r--flang/lib/Semantics/mod-file.cpp3
-rw-r--r--flang/lib/Semantics/pointer-assignment.cpp15
-rw-r--r--flang/lib/Semantics/pointer-assignment.h2
-rw-r--r--flang/lib/Semantics/resolve-names.cpp4
-rw-r--r--flang/lib/Semantics/tools.cpp2
-rw-r--r--flang/test/Lower/CUDA/cuda-managed.cuf27
-rw-r--r--flang/test/Lower/HLFIR/call-postponed-associate.f9085
-rw-r--r--flang/test/Lower/HLFIR/entry_return.f908
-rw-r--r--flang/test/Lower/HLFIR/proc-pointer-comp-nopass.f902
-rw-r--r--flang/test/Lower/OpenACC/acc-atomic-capture.f9057
-rw-r--r--flang/test/Lower/OpenACC/acc-atomic-update.f9018
-rw-r--r--flang/test/Lower/OpenMP/Todo/defaultmap-clause-firstprivate.f9011
-rw-r--r--flang/test/Lower/OpenMP/Todo/defaultmap-clause-none.f9011
-rw-r--r--flang/test/Lower/OpenMP/Todo/defaultmap-clause.f908
-rw-r--r--flang/test/Lower/OpenMP/atomic-capture.f9056
-rw-r--r--flang/test/Lower/OpenMP/atomic-update.f9021
-rw-r--r--flang/test/Lower/OpenMP/defaultmap.f90105
-rw-r--r--flang/test/Parser/OpenMP/bug518.f4
-rw-r--r--flang/test/Parser/OpenMP/compiler-directive-continuation.f9012
-rw-r--r--flang/test/Parser/OpenMP/defaultmap-clause.f9016
-rw-r--r--flang/test/Parser/OpenMP/sentinels.f4
-rw-r--r--flang/test/Parser/continuation-in-conditional-compilation.f7
-rw-r--r--flang/test/Preprocessing/bug136845.F45
-rw-r--r--flang/test/Semantics/PowerPC/ppc-vector-types04.f906
-rw-r--r--flang/test/Semantics/allocate01.f904
-rw-r--r--flang/test/Semantics/atomic02.f902
-rw-r--r--flang/test/Semantics/atomic03.f904
-rw-r--r--flang/test/Semantics/atomic04.f904
-rw-r--r--flang/test/Semantics/atomic05.f902
-rw-r--r--flang/test/Semantics/atomic06.f902
-rw-r--r--flang/test/Semantics/atomic07.f902
-rw-r--r--flang/test/Semantics/atomic08.f902
-rw-r--r--flang/test/Semantics/atomic09.f902
-rw-r--r--flang/test/Semantics/atomic10.f904
-rw-r--r--flang/test/Semantics/atomic11.f902
-rw-r--r--flang/test/Semantics/bug138915.f9015
-rw-r--r--flang/test/Semantics/c_loc01.f904
-rw-r--r--flang/test/Semantics/call07.f904
-rw-r--r--flang/test/Semantics/call44.f9013
-rw-r--r--flang/test/Semantics/coarrays02.f9024
-rw-r--r--flang/test/Semantics/coshape.f904
-rw-r--r--flang/test/Semantics/error_stop1b.f902
-rw-r--r--flang/test/Semantics/event01b.f902
-rw-r--r--flang/test/Semantics/io11.f9049
-rw-r--r--flang/test/Semantics/misc-intrinsics.f9035
-rw-r--r--flang/test/Semantics/modfile75.F9017
-rw-r--r--flang/test/Semantics/resolve34.f9033
-rw-r--r--flang/test/Semantics/resolve94.f9012
-rw-r--r--libc/CMakeLists.txt1
-rw-r--r--libclc/CMakeLists.txt1
-rw-r--r--libclc/amdgpu/lib/SOURCES9
-rw-r--r--libclc/clc/include/clc/math/clc_cos.h19
-rw-r--r--libclc/clc/include/clc/math/clc_half_cos.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_divide.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_exp.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_exp10.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_exp2.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_log.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_log10.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_log2.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_powr.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_recip.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_rsqrt.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_sin.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_sqrt.h21
-rw-r--r--libclc/clc/include/clc/math/clc_half_tan.h21
-rw-r--r--libclc/clc/include/clc/math/clc_sin.h19
-rw-r--r--libclc/clc/include/clc/math/clc_sincos.h19
-rw-r--r--libclc/clc/include/clc/math/clc_sincos_helpers.h7
-rw-r--r--libclc/clc/include/clc/math/clc_sincos_helpers_fp64.inc17
-rw-r--r--libclc/clc/include/clc/math/clc_tan.h (renamed from libclc/generic/include/math/clc_tan.h)9
-rw-r--r--libclc/clc/include/clc/math/tables.h2
-rw-r--r--libclc/clc/lib/amdgpu/SOURCES9
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_exp.cl (renamed from libclc/amdgpu/lib/math/half_exp.cl)6
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_exp10.cl (renamed from libclc/amdgpu/lib/math/half_exp10.cl)6
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_exp2.cl15
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_log.cl (renamed from libclc/amdgpu/lib/math/half_log.cl)6
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_log10.cl (renamed from libclc/amdgpu/lib/math/half_log10.cl)6
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_log2.cl (renamed from libclc/amdgpu/lib/math/half_log2.cl)6
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_native_unary.inc (renamed from libclc/amdgpu/lib/math/half_native_unary.inc)9
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_recip.cl (renamed from libclc/amdgpu/lib/math/half_recip.cl)6
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_rsqrt.cl (renamed from libclc/amdgpu/lib/math/half_rsqrt.cl)6
-rw-r--r--libclc/clc/lib/amdgpu/math/clc_half_sqrt.cl (renamed from libclc/amdgpu/lib/math/half_sqrt.cl)6
-rw-r--r--libclc/clc/lib/generic/SOURCES18
-rw-r--r--libclc/clc/lib/generic/math/clc_cos.cl21
-rw-r--r--libclc/clc/lib/generic/math/clc_cos.inc63
-rw-r--r--libclc/clc/lib/generic/math/clc_half_cos.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_divide.cl (renamed from libclc/amdgpu/lib/math/half_exp2.cl)8
-rw-r--r--libclc/clc/lib/generic/math/clc_half_divide.inc12
-rw-r--r--libclc/clc/lib/generic/math/clc_half_exp.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_exp10.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_exp2.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_log.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_log10.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_log2.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_powr.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_recip.cl12
-rw-r--r--libclc/clc/lib/generic/math/clc_half_recip.inc11
-rw-r--r--libclc/clc/lib/generic/math/clc_half_rsqrt.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_sin.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_sqrt.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_half_tan.cl16
-rw-r--r--libclc/clc/lib/generic/math/clc_sin.cl25
-rw-r--r--libclc/clc/lib/generic/math/clc_sin.inc68
-rw-r--r--libclc/clc/lib/generic/math/clc_sincos.cl14
-rw-r--r--libclc/clc/lib/generic/math/clc_sincos.inc (renamed from libclc/generic/lib/math/sincos.inc)8
-rw-r--r--libclc/clc/lib/generic/math/clc_sincos_helpers.cl24
-rw-r--r--libclc/clc/lib/generic/math/clc_sincos_helpers.inc2
-rw-r--r--libclc/clc/lib/generic/math/clc_sincos_helpers_fp64.inc235
-rw-r--r--libclc/clc/lib/generic/math/clc_tables.cl60
-rw-r--r--libclc/clc/lib/generic/math/clc_tan.cl22
-rw-r--r--libclc/clc/lib/generic/math/clc_tan.inc61
-rw-r--r--libclc/clspv/lib/SOURCES3
-rw-r--r--libclc/generic/include/clc/math/sincos.h4
-rw-r--r--libclc/generic/include/clc/math/sincos.inc14
-rw-r--r--libclc/generic/lib/SOURCES3
-rw-r--r--libclc/generic/lib/math/clc_sw_unary.inc30
-rw-r--r--libclc/generic/lib/math/clc_tan.cl62
-rw-r--r--libclc/generic/lib/math/cos.cl42
-rw-r--r--libclc/generic/lib/math/cos.inc34
-rw-r--r--libclc/generic/lib/math/half_cos.cl6
-rw-r--r--libclc/generic/lib/math/half_divide.cl9
-rw-r--r--libclc/generic/lib/math/half_exp.cl6
-rw-r--r--libclc/generic/lib/math/half_exp10.cl6
-rw-r--r--libclc/generic/lib/math/half_exp2.cl6
-rw-r--r--libclc/generic/lib/math/half_log.cl6
-rw-r--r--libclc/generic/lib/math/half_log10.cl6
-rw-r--r--libclc/generic/lib/math/half_log2.cl6
-rw-r--r--libclc/generic/lib/math/half_powr.cl6
-rw-r--r--libclc/generic/lib/math/half_recip.cl10
-rw-r--r--libclc/generic/lib/math/half_rsqrt.cl6
-rw-r--r--libclc/generic/lib/math/half_sin.cl6
-rw-r--r--libclc/generic/lib/math/half_sqrt.cl6
-rw-r--r--libclc/generic/lib/math/half_tan.cl6
-rw-r--r--libclc/generic/lib/math/half_unary.inc17
-rw-r--r--libclc/generic/lib/math/sin.cl41
-rw-r--r--libclc/generic/lib/math/sin.inc38
-rw-r--r--libclc/generic/lib/math/sincos.cl4
-rw-r--r--libclc/generic/lib/math/sincos_helpers.cl285
-rw-r--r--libclc/generic/lib/math/sincos_helpers.h24
-rw-r--r--libclc/generic/lib/math/tables.cl30
-rw-r--r--libclc/generic/lib/math/tan.cl7
-rw-r--r--libclc/spirv/lib/SOURCES3
-rw-r--r--libcxx/CMakeLists.txt1
-rw-r--r--libcxx/docs/CodingGuidelines.rst12
-rw-r--r--libcxx/docs/Hardening.rst16
-rw-r--r--libcxx/docs/ImplementationDefinedBehavior.rst12
-rw-r--r--libcxx/docs/Modules.rst2
-rw-r--r--libcxx/docs/UserDocumentation.rst131
-rw-r--r--libcxx/include/__format/format_arg_store.h28
-rw-r--r--libcxx/include/__format/formatter_string.h6
-rw-r--r--libcxx/include/__utility/pair.h32
-rw-r--r--libcxx/include/locale2
-rw-r--r--libcxx/src/call_once.cpp1
-rw-r--r--libcxx/src/condition_variable.cpp6
-rw-r--r--libcxx/src/filesystem/directory_iterator.cpp1
-rw-r--r--libcxx/src/filesystem/error.h1
-rw-r--r--libcxx/src/filesystem/filesystem_clock.cpp2
-rw-r--r--libcxx/src/filesystem/filesystem_error.cpp1
-rw-r--r--libcxx/src/filesystem/operations.cpp1
-rw-r--r--libcxx/src/include/ryu/common.h1
-rw-r--r--libcxx/src/memory.cpp2
-rw-r--r--libcxx/src/mutex.cpp1
-rw-r--r--libcxx/src/random.cpp1
-rw-r--r--libcxx/src/ryu/d2fixed.cpp1
-rw-r--r--libcxx/src/ryu/d2s.cpp1
-rw-r--r--libcxx/src/ryu/f2s.cpp2
-rw-r--r--libcxx/src/thread.cpp2
-rw-r--r--libcxx/test/libcxx/containers/associative/map/scary.compile.pass.cpp (renamed from libcxx/test/std/containers/associative/multimap/scary.pass.cpp)13
-rw-r--r--libcxx/test/libcxx/containers/associative/set/scary.compile.pass.cpp (renamed from libcxx/test/std/containers/associative/multiset/scary.pass.cpp)13
-rw-r--r--libcxx/test/libcxx/containers/associative/unord.map/scary.compile.pass.cpp (renamed from libcxx/test/std/containers/unord/unord.multimap/scary.pass.cpp)15
-rw-r--r--libcxx/test/libcxx/containers/associative/unord.set/scary.compile.pass.cpp (renamed from libcxx/test/std/containers/unord/unord.multiset/scary.pass.cpp)15
-rw-r--r--libcxx/test/libcxx/containers/container.adaptors/flat.map/scary.compile.pass.cpp33
-rw-r--r--libcxx/test/libcxx/utilities/format/format.arguments/format.arg/assert.array.pass.cpp33
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp34
-rw-r--r--libcxx/test/std/utilities/format/format.functions/format_tests.h9
-rw-r--r--libcxx/test/std/utilities/utility/pairs/pair.astuple/pairs.by.type.pass.cpp137
-rw-r--r--libcxxabi/CMakeLists.txt1
-rw-r--r--libcxxabi/src/demangle/ItaniumDemangle.h2
-rw-r--r--libcxxabi/test/test_demangle.pass.cpp6
-rw-r--r--libunwind/CMakeLists.txt1
-rw-r--r--lldb/include/lldb/Symbol/CompilerType.h5
-rw-r--r--lldb/include/lldb/Symbol/TypeSystem.h6
-rw-r--r--lldb/source/Host/windows/PipeWindows.cpp10
-rw-r--r--lldb/source/Host/windows/ProcessLauncherWindows.cpp73
-rw-r--r--lldb/source/Plugins/ObjectFile/XCOFF/ObjectFileXCOFF.cpp50
-rw-r--r--lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp2
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp46
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h6
-rw-r--r--lldb/source/Symbol/CompilerType.cpp12
-rw-r--r--lldb/source/Target/RegisterContextUnwind.cpp21
-rw-r--r--lldb/source/ValueObject/ValueObject.cpp115
-rw-r--r--lldb/test/API/commands/frame/var-dil/basics/PointerArithmetic/TestFrameVarDILPointerArithmetic.py6
-rw-r--r--lldb/test/API/functionalities/completion/TestCompletion.py12
-rw-r--r--lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/optional/TestDataFormatterGenericOptional.py2
-rw-r--r--lldb/test/API/functionalities/unwind/frameless-faulted/Makefile13
-rw-r--r--lldb/test/API/functionalities/unwind/frameless-faulted/TestUnwindFramelessFaulted.py128
-rw-r--r--lldb/test/API/functionalities/unwind/frameless-faulted/interrupt-and-trap-funcs.s135
-rw-r--r--lldb/test/API/functionalities/unwind/frameless-faulted/main.c7
-rw-r--r--lldb/test/Shell/ObjectFile/XCOFF/basic-info.yaml85
-rw-r--r--lldb/tools/lldb-dap/CMakeLists.txt31
-rw-r--r--lldb/tools/lldb-dap/JSONUtils.cpp15
-rw-r--r--lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp10
-rw-r--r--lldb/tools/lldb-dap/Protocol/ProtocolTypes.h2
-rw-r--r--lldb/tools/lldb-dap/tool/CMakeLists.txt28
-rw-r--r--lldb/tools/lldb-dap/tool/lldb-dap-Info.plist.in (renamed from lldb/tools/lldb-dap/lldb-dap-Info.plist.in)0
-rw-r--r--lldb/tools/lldb-dap/tool/lldb-dap.cpp (renamed from lldb/tools/lldb-dap/lldb-dap.cpp)0
-rw-r--r--lldb/tools/lldb-server/lldb-platform.cpp2
-rw-r--r--lldb/unittests/CMakeLists.txt3
-rw-r--r--lldb/unittests/DAP/CMakeLists.txt11
-rw-r--r--lldb/unittests/DAP/JSONUtilsTest.cpp195
-rw-r--r--lldb/unittests/DAP/LLDBUtilsTest.cpp65
-rw-r--r--lldb/unittests/DAP/ProtocolTypesTest.cpp62
-rw-r--r--lldb/unittests/Host/HostTest.cpp2
-rw-r--r--llvm/docs/LangRef.rst12
-rw-r--r--llvm/docs/RISCVUsage.rst7
-rw-r--r--llvm/docs/ReleaseNotes.md1
-rw-r--r--llvm/include/llvm/ADT/APInt.h3
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h2
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfoImpl.h5
-rw-r--r--llvm/include/llvm/Analysis/VecFuncs.def2
-rw-r--r--llvm/include/llvm/DebugInfo/BTF/BTF.h6
-rw-r--r--llvm/include/llvm/DebugInfo/DIContext.h2
-rw-r--r--llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h7
-rw-r--r--llvm/include/llvm/DebugInfo/GSYM/GsymDIContext.h66
-rw-r--r--llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h3
-rw-r--r--llvm/include/llvm/Demangle/ItaniumDemangle.h2
-rw-r--r--llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h3
-rw-r--r--llvm/include/llvm/IR/Constants.h2
-rw-r--r--llvm/include/llvm/IR/DIBuilder.h13
-rw-r--r--llvm/include/llvm/IR/DataLayout.h12
-rw-r--r--llvm/include/llvm/IR/InlineAsm.h2
-rw-r--r--llvm/include/llvm/MC/MCContext.h17
-rw-r--r--llvm/include/llvm/Support/AArch64AttributeParser.h3
-rw-r--r--llvm/include/llvm/Support/AMDGPUMetadata.h5
-rw-r--r--llvm/include/llvm/Support/ARMAttributeParser.h3
-rw-r--r--llvm/include/llvm/Support/ARMBuildAttributes.h3
-rw-r--r--llvm/include/llvm/Support/ARMWinEH.h5
-rw-r--r--llvm/include/llvm/Support/Allocator.h5
-rw-r--r--llvm/include/llvm/Support/BalancedPartitioning.h13
-rw-r--r--llvm/include/llvm/Support/Base64.h4
-rw-r--r--llvm/include/llvm/Support/BinaryStreamError.h3
-rw-r--r--llvm/include/llvm/Support/BinaryStreamReader.h38
-rw-r--r--llvm/include/llvm/Support/BinaryStreamRef.h37
-rw-r--r--llvm/include/llvm/Support/BinaryStreamWriter.h28
-rw-r--r--llvm/include/llvm/Support/BlockFrequency.h15
-rw-r--r--llvm/include/llvm/Support/BranchProbability.h15
-rw-r--r--llvm/include/llvm/Support/BuryPointer.h3
-rw-r--r--llvm/include/llvm/Support/COM.h8
-rw-r--r--llvm/include/llvm/Support/CRC.h7
-rw-r--r--llvm/include/llvm/Support/CSKYAttributeParser.h3
-rw-r--r--llvm/include/llvm/Support/CSKYAttributes.h3
-rw-r--r--llvm/include/llvm/Support/CachePruning.h9
-rw-r--r--llvm/include/llvm/Support/Caching.h3
-rw-r--r--llvm/include/llvm/Support/Chrono.h36
-rw-r--r--llvm/include/llvm/Support/CommandLine.h187
-rw-r--r--llvm/include/llvm/Support/Compression.h55
-rw-r--r--llvm/include/llvm/Support/ConvertEBCDIC.h7
-rw-r--r--llvm/include/llvm/Support/ConvertUTF.h110
-rw-r--r--llvm/include/llvm/Support/CrashRecoveryContext.h34
-rw-r--r--llvm/include/llvm/Support/DJB.h3
-rw-r--r--llvm/include/llvm/Support/DataExtractor.h50
-rw-r--r--llvm/include/llvm/Support/Debug.h8
-rw-r--r--llvm/include/llvm/Support/DebugCounter.h17
-rw-r--r--llvm/include/llvm/Support/DivisionByConstantInfo.h5
-rw-r--r--llvm/include/llvm/Support/DynamicLibrary.h25
-rw-r--r--llvm/include/llvm/Support/ELFAttrParserCompact.h3
-rw-r--r--llvm/include/llvm/Support/ELFAttrParserExtended.h3
-rw-r--r--llvm/include/llvm/Support/ELFAttributes.h8
-rw-r--r--llvm/include/llvm/Support/Errno.h5
-rw-r--r--llvm/include/llvm/Support/Error.h40
-rw-r--r--llvm/include/llvm/Support/ErrorHandling.h48
-rw-r--r--llvm/include/llvm/Support/ExponentialBackoff.h3
-rw-r--r--llvm/include/llvm/Support/ExtensibleRTTI.h4
-rw-r--r--llvm/include/llvm/Support/FileCollector.h7
-rw-r--r--llvm/include/llvm/Support/FileOutputBuffer.h3
-rw-r--r--llvm/include/llvm/Support/FileSystem.h298
-rw-r--r--llvm/include/llvm/Support/FileUtilities.h11
-rw-r--r--llvm/include/llvm/Support/Format.h3
-rw-r--r--llvm/include/llvm/Support/FormatVariadic.h3
-rw-r--r--llvm/include/llvm/Support/FormatVariadicDetails.h5
-rw-r--r--llvm/include/llvm/Support/FormattedStream.h9
-rw-r--r--llvm/include/llvm/Support/GlobPattern.h9
-rw-r--r--llvm/include/llvm/Support/GraphWriter.h11
-rw-r--r--llvm/include/llvm/Support/HexagonAttributeParser.h3
-rw-r--r--llvm/include/llvm/Support/HexagonAttributes.h3
-rw-r--r--llvm/include/llvm/Support/InitLLVM.h7
-rw-r--r--llvm/include/llvm/Support/InstructionCost.h8
-rw-r--r--llvm/include/llvm/Support/JSON.h92
-rw-r--r--llvm/include/llvm/Support/KnownBits.h136
-rw-r--r--llvm/include/llvm/Support/LEB128.h5
-rw-r--r--llvm/include/llvm/Support/LineIterator.h13
-rw-r--r--llvm/include/llvm/Support/Locale.h7
-rw-r--r--llvm/include/llvm/Support/LockFileManager.h3
-rw-r--r--llvm/include/llvm/Support/MD5.h22
-rw-r--r--llvm/include/llvm/Support/MSP430AttributeParser.h3
-rw-r--r--llvm/include/llvm/Support/MSP430Attributes.h3
-rw-r--r--llvm/include/llvm/Support/ManagedStatic.h8
-rw-r--r--llvm/include/llvm/Support/MathExtras.h2
-rw-r--r--llvm/include/llvm/Support/MemAlloc.h4
-rw-r--r--llvm/include/llvm/Support/Memory.h17
-rw-r--r--llvm/include/llvm/Support/MemoryBuffer.h15
-rw-r--r--llvm/include/llvm/Support/MemoryBufferRef.h3
-rw-r--r--llvm/include/llvm/Support/ModRef.h9
-rw-r--r--llvm/include/llvm/Support/Mustache.h19
-rw-r--r--llvm/include/llvm/Support/NativeFormatting.h42
-rw-r--r--llvm/include/llvm/Support/OptimizedStructLayout.h7
-rw-r--r--llvm/include/llvm/Support/PGOOptions.h25
-rw-r--r--llvm/include/llvm/Support/Parallel.h18
-rw-r--r--llvm/include/llvm/Support/Path.h148
-rw-r--r--llvm/include/llvm/Support/PluginLoader.h8
-rw-r--r--llvm/include/llvm/Support/PrettyStackTrace.h24
-rw-r--r--llvm/include/llvm/Support/Process.h61
-rw-r--r--llvm/include/llvm/Support/Program.h76
-rw-r--r--llvm/include/llvm/Support/RISCVAttributeParser.h3
-rw-r--r--llvm/include/llvm/Support/RISCVAttributes.h3
-rw-r--r--llvm/include/llvm/Support/RISCVISAUtils.h3
-rw-r--r--llvm/include/llvm/Support/RandomNumberGenerator.h4
-rw-r--r--llvm/include/llvm/Support/Recycler.h4
-rw-r--r--llvm/include/llvm/Support/Regex.h28
-rw-r--r--llvm/include/llvm/Support/SHA1.h13
-rw-r--r--llvm/include/llvm/Support/SHA256.h13
-rw-r--r--llvm/include/llvm/Support/SMTAPI.h11
-rw-r--r--llvm/include/llvm/Support/ScaledNumber.h21
-rw-r--r--llvm/include/llvm/Support/ScopedPrinter.h11
-rw-r--r--llvm/include/llvm/Support/Signals.h34
-rw-r--r--llvm/include/llvm/Support/Signposts.h11
-rw-r--r--llvm/include/llvm/Support/SipHash.h11
-rw-r--r--llvm/include/llvm/Support/SmallVectorMemoryBuffer.h3
-rw-r--r--llvm/include/llvm/Support/SourceMgr.h73
-rw-r--r--llvm/include/llvm/Support/SpecialCaseList.h42
-rw-r--r--llvm/include/llvm/Support/StringSaver.h9
-rw-r--r--llvm/include/llvm/Support/SuffixTree.h7
-rw-r--r--llvm/include/llvm/Support/SuffixTreeNode.h21
-rw-r--r--llvm/include/llvm/Support/SystemUtils.h4
-rw-r--r--llvm/include/llvm/Support/TarWriter.h7
-rw-r--r--llvm/include/llvm/Support/TargetSelect.h23
-rw-r--r--llvm/include/llvm/Support/ThreadPool.h7
-rw-r--r--llvm/include/llvm/Support/Threading.h25
-rw-r--r--llvm/include/llvm/Support/TimeProfiler.h42
-rw-r--r--llvm/include/llvm/Support/Timer.h56
-rw-r--r--llvm/include/llvm/Support/ToolOutputFile.h11
-rw-r--r--llvm/include/llvm/Support/TypeSize.h5
-rw-r--r--llvm/include/llvm/Support/Unicode.h16
-rw-r--r--llvm/include/llvm/Support/Valgrind.h9
-rw-r--r--llvm/include/llvm/Support/VersionTuple.h9
-rw-r--r--llvm/include/llvm/Support/VirtualFileSystem.h89
-rw-r--r--llvm/include/llvm/Support/Windows/WindowsSupport.h18
-rw-r--r--llvm/include/llvm/Support/WindowsError.h5
-rw-r--r--llvm/include/llvm/Support/WithColor.h48
-rw-r--r--llvm/include/llvm/Support/YAMLParser.h57
-rw-r--r--llvm/include/llvm/Support/YAMLTraits.h80
-rw-r--r--llvm/include/llvm/Support/raw_os_ostream.h3
-rw-r--r--llvm/include/llvm/Support/raw_ostream.h37
-rw-r--r--llvm/include/llvm/Support/raw_socket_stream.h17
-rw-r--r--llvm/include/llvm/Support/thread.h13
-rw-r--r--llvm/include/llvm/Support/xxhash.h9
-rw-r--r--llvm/include/llvm/TableGen/Record.h26
-rw-r--r--llvm/lib/Analysis/DXILResource.cpp4
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp5
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp60
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp8
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp5
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp4
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp8
-rw-r--r--llvm/lib/Bitcode/Writer/ValueEnumerator.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp9
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp13
-rw-r--r--llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/FastISel.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp7
-rw-r--r--llvm/lib/DebugInfo/GSYM/CMakeLists.txt1
-rw-r--r--llvm/lib/DebugInfo/GSYM/GsymDIContext.cpp166
-rw-r--r--llvm/lib/DebugInfo/Symbolize/CMakeLists.txt1
-rw-r--r--llvm/lib/DebugInfo/Symbolize/Symbolize.cpp94
-rw-r--r--llvm/lib/Debuginfod/Debuginfod.cpp3
-rw-r--r--llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp16
-rw-r--r--llvm/lib/FuzzMutate/IRMutator.cpp4
-rw-r--r--llvm/lib/IR/AttributeImpl.h27
-rw-r--r--llvm/lib/IR/Attributes.cpp4
-rw-r--r--llvm/lib/IR/Core.cpp6
-rw-r--r--llvm/lib/IR/DIBuilder.cpp13
-rw-r--r--llvm/lib/IR/Verifier.cpp1
-rw-r--r--llvm/lib/ObjCopy/Archive.cpp15
-rw-r--r--llvm/lib/ObjCopy/CommonConfig.cpp7
-rw-r--r--llvm/lib/ObjCopy/ConfigManager.cpp7
-rw-r--r--llvm/lib/ObjCopy/ObjCopy.cpp11
-rw-r--r--llvm/lib/Support/APFloat.cpp10
-rw-r--r--llvm/lib/Support/Chrono.cpp13
-rw-r--r--llvm/lib/Support/CommandLine.cpp27
-rw-r--r--llvm/lib/Support/TrieRawHashMap.cpp4
-rw-r--r--llvm/lib/TableGen/Record.cpp28
-rw-r--r--llvm/lib/TableGen/SetTheory.cpp2
-rw-r--r--llvm/lib/TableGen/TGParser.cpp7
-rw-r--r--llvm/lib/TableGen/TGParser.h2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp7
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp37
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td15
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp17
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.h2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td20
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h5
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.h14
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp21
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp109
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp256
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPreloadKernelArguments.cpp358
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/ARM/ARMAsmPrinter.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMTargetTransformInfo.h3
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.cpp34
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.h60
-rw-r--r--llvm/lib/Target/AVR/AVRInstrInfo.td55
-rw-r--r--llvm/lib/Target/AVR/AVRSelectionDAGInfo.cpp (renamed from libclc/generic/lib/math/half_binary.inc)14
-rw-r--r--llvm/lib/Target/AVR/AVRSelectionDAGInfo.h8
-rw-r--r--llvm/lib/Target/AVR/CMakeLists.txt2
-rw-r--r--llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp3
-rw-r--r--llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp292
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp133
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.h7
-rw-r--r--llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp5
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td8
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp83
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td86
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZb.td132
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrPredicates.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVScheduleV.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp4
-rw-r--r--llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp4
-rw-r--r--llvm/lib/Target/Sparc/Sparc.td16
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrAliases.td4
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrFormats.td17
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.td13
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrUAOSA.td47
-rw-r--r--llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp11
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp13
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp8
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h34
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.cpp22
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.h2
-rw-r--r--llvm/lib/Target/X86/X86SuppressAPXForReloc.cpp31
-rw-r--r--llvm/lib/TargetParser/ARMTargetParserCommon.cpp6
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/LowerTypeTests.cpp12
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp21
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp15
-rw-r--r--llvm/lib/Transforms/Scalar/LoopDistribute.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp59
-rw-r--r--llvm/lib/Transforms/Utils/BasicBlockUtils.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp11
-rw-r--r--llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp7
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp12
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp121
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp426
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp33
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp48
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp81
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.h20
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll57
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-ld1.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-copy.ll262
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/ctlz.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/ctpop.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll9
-rw-r--r--llvm/test/CodeGen/AArch64/load.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll43
-rw-r--r--llvm/test/CodeGen/AArch64/reserveXreg.ll342
-rw-r--r--llvm/test/CodeGen/AArch64/sadd_sat_vec.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/setcc-fsh.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/shufflevector.ll13
-rw-r--r--llvm/test/CodeGen/AArch64/ssub_sat_vec.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll25
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll13
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll43
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll44
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll178
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/sve-insert-vector.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vscale-attr.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve2-bsl.ll206
-rw-r--r--llvm/test/CodeGen/AArch64/uadd_sat_vec.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/usub_sat_vec.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.atomic.fadd-with-ret.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll3697
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll4017
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll4343
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll4663
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll4989
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll5309
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll5635
-rw-r--r--llvm/test/CodeGen/AMDGPU/ashr.v2i16.ll719
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll3525
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll3403
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll3403
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll33
-rw-r--r--llvm/test/CodeGen/AMDGPU/combine_vloads.ll120
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll7079
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll7911
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll7911
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll7737
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll1079
-rw-r--r--llvm/test/CodeGen/AMDGPU/gfx11-twoaddr-fma.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll7336
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll7882
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll7882
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll7708
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll185
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll3496
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll4150
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll4150
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll4102
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-IR-lowering.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-debug-info.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs-IR-lowering.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll263
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-vgpr-to-sgpr-return.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll542
-rw-r--r--llvm/test/CodeGen/AMDGPU/wwm-reserved.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/attributes.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zba.ll274
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zba.ll1027
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zvqdotq.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir30
-rw-r--r--llvm/test/CodeGen/X86/apx/reloc-opt.ll269
-rw-r--r--llvm/test/CodeGen/X86/apx/reloc.mir30
-rw-r--r--llvm/test/CodeGen/X86/atomic-load-store.ll19
-rw-r--r--llvm/test/CodeGen/X86/avx10_2-cmp.ll21
-rw-r--r--llvm/test/CodeGen/X86/movtopush.ll1
-rw-r--r--llvm/test/CodeGen/X86/musttail-tailcc.ll68
-rw-r--r--llvm/test/CodeGen/X86/tailcall-tailcc.ll108
-rw-r--r--llvm/test/CodeGen/X86/tailcc-fastcc.ll26
-rw-r--r--llvm/test/CodeGen/X86/win64_eh.ll1
-rw-r--r--llvm/test/CodeGen/X86/win64_frame.ll2
-rw-r--r--llvm/test/CodeGen/X86/win_chkstk.ll2
-rw-r--r--llvm/test/DebugInfo/Generic/debug-names-asm-label.ll54
-rw-r--r--llvm/test/DebugInfo/Generic/multi-variant.ll74
-rw-r--r--llvm/test/MC/Disassembler/Sparc/sparc-ua-osa.txt31
-rw-r--r--llvm/test/MC/RISCV/xandesvpackfph-valid.s39
-rw-r--r--llvm/test/MC/Sparc/sparc-ua2005.s17
-rw-r--r--llvm/test/MC/Sparc/sparc-ua2007.s30
-rw-r--r--llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll294
-rw-r--r--llvm/test/Transforms/InstCombine/AMDGPU/fmed3-fpext-fold.ll642
-rw-r--r--llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll1349
-rw-r--r--llvm/test/Transforms/InstCombine/getelementptr.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll36
-rw-r--r--llvm/test/Transforms/LICM/salvage-hoisted-add.ll70
-rw-r--r--llvm/test/Transforms/LICM/salvage-hoisted-binop.ll45
-rw-r--r--llvm/test/Transforms/LICM/salvage-hoisted-gep.ll58
-rw-r--r--llvm/test/Transforms/LICM/salvage-hoisted-sub.ll70
-rw-r--r--llvm/test/Transforms/LoopDistribute/salvage-dbg-values-in-distributed-loops.ll77
-rw-r--r--llvm/test/Transforms/LoopUnroll/peel-last-iteration.ll431
-rw-r--r--llvm/test/Transforms/LoopVersioningLICM/load-from-unknown-address.ll307
-rw-r--r--llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll8
-rw-r--r--llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/pr48223.ll163
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/reordered-interleaved-loads.ll134
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/unordered-loads-operands.ll71
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/long-pointer-distance.ll21
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll47
-rw-r--r--llvm/test/Transforms/SimplifyCFG/hoist-sink-swifterror-store.ll92
-rw-r--r--llvm/test/tools/dsymutil/ARM/swiftmodule.test5
-rwxr-xr-xllvm/test/tools/llvm-symbolizer/Inputs/addr-gsymonly.exebin0 -> 8528 bytes
-rw-r--r--llvm/test/tools/llvm-symbolizer/Inputs/addr-gsymonly.exe.gsymbin0 -> 536 bytes
-rw-r--r--llvm/test/tools/llvm-symbolizer/sym-gsymonly.test93
-rw-r--r--llvm/tools/dsymutil/DwarfLinkerForBinary.cpp5
-rw-r--r--llvm/tools/llvm-symbolizer/Opts.td5
-rw-r--r--llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp2
-rw-r--r--llvm/unittests/Support/TrailingObjectsTest.cpp40
-rw-r--r--llvm/unittests/TargetParser/RISCVISAInfoTest.cpp1
-rw-r--r--llvm/unittests/Transforms/Utils/CloningTest.cpp19
-rw-r--r--llvm/unittests/Transforms/Vectorize/VPlanTestBase.h4
-rw-r--r--llvm/utils/TableGen/AsmMatcherEmitter.cpp47
-rw-r--r--llvm/utils/TableGen/AsmWriterEmitter.cpp16
-rw-r--r--llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp10
-rw-r--r--llvm/utils/TableGen/Basic/VTEmitter.cpp3
-rw-r--r--llvm/utils/TableGen/CodeEmitterGen.cpp5
-rw-r--r--llvm/utils/TableGen/CodeGenMapTable.cpp8
-rw-r--r--llvm/utils/TableGen/Common/AsmWriterInst.h2
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp20
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.h2
-rw-r--r--llvm/utils/TableGen/Common/CodeGenHwModes.cpp2
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstAlias.cpp12
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstruction.cpp33
-rw-r--r--llvm/utils/TableGen/Common/CodeGenSchedule.cpp9
-rw-r--r--llvm/utils/TableGen/Common/CodeGenSchedule.h2
-rw-r--r--llvm/utils/TableGen/Common/CodeGenTarget.cpp2
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp12
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h5
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp3
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp3
-rw-r--r--llvm/utils/TableGen/CompressInstEmitter.cpp3
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherEmitter.cpp15
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherGen.cpp2
-rw-r--r--llvm/utils/TableGen/DFAEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/DFAPacketizerEmitter.cpp12
-rw-r--r--llvm/utils/TableGen/DecoderEmitter.cpp13
-rw-r--r--llvm/utils/TableGen/ExegesisEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/FastISelEmitter.cpp26
-rw-r--r--llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/GlobalISelEmitter.cpp12
-rw-r--r--llvm/utils/TableGen/InstrDocsEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp3
-rw-r--r--llvm/utils/TableGen/OptionParserEmitter.cpp16
-rw-r--r--llvm/utils/TableGen/PseudoLoweringEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/RegisterBankEmitter.cpp3
-rw-r--r--llvm/utils/TableGen/RegisterInfoEmitter.cpp10
-rw-r--r--llvm/utils/TableGen/SearchableTableEmitter.cpp24
-rw-r--r--llvm/utils/TableGen/SubtargetEmitter.cpp12
-rw-r--r--llvm/utils/TableGen/X86RecognizableInstr.cpp4
-rw-r--r--llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/lldb/test/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn26
-rw-r--r--llvm/utils/gn/secondary/lldb/tools/lldb-dap/tool/BUILD.gn35
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/DebugInfo/GSYM/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn8
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td105
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td64
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml70
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td209
-rw-r--r--mlir/include/mlir/Dialect/MLProgram/IR/MLProgramAttributes.td3
-rw-r--r--mlir/include/mlir/Dialect/Vector/IR/VectorOps.td6
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td1
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h64
-rw-r--r--mlir/include/mlir/IR/Operation.h5
-rw-r--r--mlir/include/mlir/Interfaces/VectorInterfaces.td10
-rw-r--r--mlir/include/mlir/Tools/PDLL/AST/Nodes.h52
-rw-r--r--mlir/lib/Analysis/Presburger/IntegerRelation.cpp3
-rw-r--r--mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp1
-rw-r--r--mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp12
-rw-r--r--mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp16
-rw-r--r--mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp3
-rw-r--r--mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp22
-rw-r--r--mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp10
-rw-r--r--mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp4
-rw-r--r--mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp3
-rw-r--r--mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp10
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp33
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp28
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp312
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp10
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp6
-rw-r--r--mlir/lib/Dialect/MLProgram/IR/MLProgramDialect.cpp8
-rw-r--r--mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp2
-rw-r--r--mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp4
-rw-r--r--mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp3
-rw-r--r--mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp4
-rw-r--r--mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp4
-rw-r--r--mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp4
-rw-r--r--mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp3
-rw-r--r--mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp3
-rw-r--r--mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp3
-rw-r--r--mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp3
-rw-r--r--mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/IR/VectorOps.cpp33
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp4
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp4
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp22
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp4
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp4
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp10
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp6
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp38
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp22
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp14
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp4
-rw-r--r--mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp2
-rw-r--r--mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp1
-rw-r--r--mlir/lib/Dialect/XeGPU/Transforms/CMakeLists.txt1
-rw-r--r--mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp3
-rw-r--r--mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp427
-rw-r--r--mlir/lib/ExecutionEngine/SyclRuntimeWrappers.cpp3
-rw-r--r--mlir/lib/IR/AffineMapDetail.h8
-rw-r--r--mlir/lib/IR/Location.cpp11
-rw-r--r--mlir/lib/IR/TypeDetail.h9
-rw-r--r--mlir/lib/Target/LLVM/NVVM/Target.cpp3
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleImport.cpp3
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleTranslation.cpp3
-rw-r--r--mlir/lib/Transforms/OpStats.cpp4
-rw-r--r--mlir/python/mlir/dialects/linalg/__init__.py74
-rw-r--r--mlir/test/Dialect/LLVMIR/nvvm.mlir9
-rw-r--r--mlir/test/Dialect/Linalg/generalize-named-ops.mlir28
-rw-r--r--mlir/test/Dialect/Linalg/invalid.mlir207
-rw-r--r--mlir/test/Dialect/Linalg/named-ops.mlir169
-rw-r--r--mlir/test/Dialect/SCF/canonicalize.mlir8
-rw-r--r--mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir161
-rw-r--r--mlir/test/Target/LLVMIR/nvvmir.mlir22
-rw-r--r--mlir/test/lib/Dialect/CMakeLists.txt1
-rw-r--r--mlir/test/lib/Dialect/Test/TestOpDefs.cpp3
-rw-r--r--mlir/test/lib/Dialect/XeGPU/CMakeLists.txt16
-rw-r--r--mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp124
-rw-r--r--mlir/test/python/dialects/linalg/ops.py287
-rw-r--r--mlir/tools/mlir-opt/CMakeLists.txt1
-rw-r--r--mlir/tools/mlir-opt/mlir-opt.cpp2
-rw-r--r--offload/test/offloading/fortran/target-defaultmap-present.f9034
-rw-r--r--offload/test/offloading/fortran/target-defaultmap.f90166
-rw-r--r--polly/lib/Analysis/DependenceInfo.cpp2
-rw-r--r--polly/lib/Analysis/ScopDetection.cpp12
-rw-r--r--utils/bazel/llvm-project-overlay/lldb/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel16
912 files changed, 98908 insertions, 51162 deletions
diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h
index 47e2016..6f3b592 100644
--- a/bolt/include/bolt/Core/BinaryFunction.h
+++ b/bolt/include/bolt/Core/BinaryFunction.h
@@ -142,8 +142,8 @@ public:
/// Types of profile the function can use. Could be a combination.
enum {
PF_NONE = 0, /// No profile.
- PF_LBR = 1, /// Profile is based on last branch records.
- PF_IP = 2, /// Non-LBR sample-based profile.
+ PF_BRANCH = 1, /// Profile is based on branches or branch stacks.
+ PF_BASIC = 2, /// Non-branch IP sample-based profile.
PF_MEMEVENT = 4, /// Profile has mem events.
};
diff --git a/bolt/include/bolt/Profile/ProfileYAMLMapping.h b/bolt/include/bolt/Profile/ProfileYAMLMapping.h
index f45d411..a8d9a15 100644
--- a/bolt/include/bolt/Profile/ProfileYAMLMapping.h
+++ b/bolt/include/bolt/Profile/ProfileYAMLMapping.h
@@ -230,8 +230,8 @@ LLVM_YAML_STRONG_TYPEDEF(uint16_t, PROFILE_PF)
template <> struct ScalarBitSetTraits<PROFILE_PF> {
static void bitset(IO &io, PROFILE_PF &value) {
- io.bitSetCase(value, "lbr", BinaryFunction::PF_LBR);
- io.bitSetCase(value, "sample", BinaryFunction::PF_IP);
+ io.bitSetCase(value, "lbr", BinaryFunction::PF_BRANCH);
+ io.bitSetCase(value, "sample", BinaryFunction::PF_BASIC);
io.bitSetCase(value, "memevent", BinaryFunction::PF_MEMEVENT);
}
};
diff --git a/bolt/lib/Core/BinaryFunctionProfile.cpp b/bolt/lib/Core/BinaryFunctionProfile.cpp
index 726da6a..9409106 100644
--- a/bolt/lib/Core/BinaryFunctionProfile.cpp
+++ b/bolt/lib/Core/BinaryFunctionProfile.cpp
@@ -70,7 +70,7 @@ void BinaryFunction::postProcessProfile() {
return;
}
- if (!(getProfileFlags() & PF_LBR))
+ if (!(getProfileFlags() & PF_BRANCH))
return;
// If we have at least some branch data for the function indicate that it
diff --git a/bolt/lib/Passes/MCF.cpp b/bolt/lib/Passes/MCF.cpp
index cd254a4..4f3a964 100644
--- a/bolt/lib/Passes/MCF.cpp
+++ b/bolt/lib/Passes/MCF.cpp
@@ -458,7 +458,7 @@ void EstimateEdgeCounts::runOnFunction(BinaryFunction &BF) {
Error EstimateEdgeCounts::runOnFunctions(BinaryContext &BC) {
if (llvm::none_of(llvm::make_second_range(BC.getBinaryFunctions()),
[](const BinaryFunction &BF) {
- return BF.getProfileFlags() == BinaryFunction::PF_IP;
+ return BF.getProfileFlags() == BinaryFunction::PF_BASIC;
}))
return Error::success();
@@ -466,7 +466,7 @@ Error EstimateEdgeCounts::runOnFunctions(BinaryContext &BC) {
runOnFunction(BF);
};
ParallelUtilities::PredicateTy SkipFunc = [&](const BinaryFunction &BF) {
- return BF.getProfileFlags() != BinaryFunction::PF_IP;
+ return BF.getProfileFlags() != BinaryFunction::PF_BASIC;
};
ParallelUtilities::runOnEachFunction(
diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp
index 27c250b..a3671a4 100644
--- a/bolt/lib/Profile/DataAggregator.cpp
+++ b/bolt/lib/Profile/DataAggregator.cpp
@@ -568,11 +568,11 @@ void DataAggregator::processProfile(BinaryContext &BC) {
for (auto &BFI : BC.getBinaryFunctions()) {
BinaryFunction &BF = BFI.second;
if (FuncBranchData *FBD = getBranchData(BF)) {
- BF.markProfiled(BinaryFunction::PF_LBR);
+ BF.markProfiled(BinaryFunction::PF_BRANCH);
BF.RawSampleCount = FBD->getNumExecutedBranches();
} else if (FuncBasicSampleData *FSD =
getFuncBasicSampleData(BF.getNames())) {
- BF.markProfiled(BinaryFunction::PF_IP);
+ BF.markProfiled(BinaryFunction::PF_BASIC);
BF.RawSampleCount = FSD->getSamples();
}
}
@@ -2240,8 +2240,8 @@ std::error_code DataAggregator::writeBATYAML(BinaryContext &BC,
for (const StringMapEntry<std::nullopt_t> &EventEntry : EventNames)
EventNamesOS << LS << EventEntry.first().str();
- BP.Header.Flags =
- opts::BasicAggregation ? BinaryFunction::PF_IP : BinaryFunction::PF_LBR;
+ BP.Header.Flags = opts::BasicAggregation ? BinaryFunction::PF_BASIC
+ : BinaryFunction::PF_BRANCH;
// Add probe inline tree nodes.
YAMLProfileWriter::InlineTreeDesc InlineTree;
diff --git a/bolt/lib/Profile/DataReader.cpp b/bolt/lib/Profile/DataReader.cpp
index 3376bef..198f7d8 100644
--- a/bolt/lib/Profile/DataReader.cpp
+++ b/bolt/lib/Profile/DataReader.cpp
@@ -358,12 +358,12 @@ void DataReader::readProfile(BinaryFunction &BF) {
return;
if (!hasLBR()) {
- BF.ProfileFlags = BinaryFunction::PF_IP;
+ BF.ProfileFlags = BinaryFunction::PF_BASIC;
readBasicSampleData(BF);
return;
}
- BF.ProfileFlags = BinaryFunction::PF_LBR;
+ BF.ProfileFlags = BinaryFunction::PF_BRANCH;
// Possibly assign/re-assign branch profile data.
matchProfileData(BF);
@@ -1035,9 +1035,8 @@ ErrorOr<BasicSampleInfo> DataReader::parseSampleInfo() {
}
ErrorOr<bool> DataReader::maybeParseNoLBRFlag() {
- if (ParsingBuf.size() < 6 || ParsingBuf.substr(0, 6) != "no_lbr")
+ if (!ParsingBuf.consume_front("no_lbr"))
return false;
- ParsingBuf = ParsingBuf.drop_front(6);
Col += 6;
if (ParsingBuf.size() > 0 && ParsingBuf[0] == ' ')
@@ -1058,9 +1057,8 @@ ErrorOr<bool> DataReader::maybeParseNoLBRFlag() {
}
ErrorOr<bool> DataReader::maybeParseBATFlag() {
- if (ParsingBuf.size() < 16 || ParsingBuf.substr(0, 16) != "boltedcollection")
+ if (!ParsingBuf.consume_front("boltedcollection"))
return false;
- ParsingBuf = ParsingBuf.drop_front(16);
Col += 16;
if (!checkAndConsumeNewLine()) {
diff --git a/bolt/lib/Profile/YAMLProfileReader.cpp b/bolt/lib/Profile/YAMLProfileReader.cpp
index 1cdc51a..821f8c3 100644
--- a/bolt/lib/Profile/YAMLProfileReader.cpp
+++ b/bolt/lib/Profile/YAMLProfileReader.cpp
@@ -221,7 +221,7 @@ bool YAMLProfileReader::parseFunctionProfile(
// Basic samples profile (without LBR) does not have branches information
// and needs a special processing.
- if (YamlBP.Header.Flags & BinaryFunction::PF_IP) {
+ if (YamlBP.Header.Flags & BinaryFunction::PF_BASIC) {
if (!YamlBB.EventCount) {
BB.setExecutionCount(0);
continue;
@@ -338,7 +338,7 @@ bool YAMLProfileReader::parseFunctionProfile(
if (BB.getExecutionCount() == BinaryBasicBlock::COUNT_NO_PROFILE)
BB.setExecutionCount(0);
- if (YamlBP.Header.Flags & BinaryFunction::PF_IP)
+ if (YamlBP.Header.Flags & BinaryFunction::PF_BASIC)
BF.setExecutionCount(FunctionExecutionCount);
ProfileMatched &= !MismatchedBlocks && !MismatchedCalls && !MismatchedEdges;
diff --git a/bolt/lib/Profile/YAMLProfileWriter.cpp b/bolt/lib/Profile/YAMLProfileWriter.cpp
index 2bdc7b6..f1fe45f 100644
--- a/bolt/lib/Profile/YAMLProfileWriter.cpp
+++ b/bolt/lib/Profile/YAMLProfileWriter.cpp
@@ -215,7 +215,7 @@ YAMLProfileWriter::convert(const BinaryFunction &BF, bool UseDFS,
const MCPseudoProbeDecoder *PseudoProbeDecoder =
opts::ProfileWritePseudoProbes ? BC.getPseudoProbeDecoder() : nullptr;
- const uint16_t LBRProfile = BF.getProfileFlags() & BinaryFunction::PF_LBR;
+ const uint16_t LBRProfile = BF.getProfileFlags() & BinaryFunction::PF_BRANCH;
// Prepare function and block hashes
BF.computeHash(UseDFS);
diff --git a/bolt/lib/Rewrite/BuildIDRewriter.cpp b/bolt/lib/Rewrite/BuildIDRewriter.cpp
index 83d0c9b..d50416f 100644
--- a/bolt/lib/Rewrite/BuildIDRewriter.cpp
+++ b/bolt/lib/Rewrite/BuildIDRewriter.cpp
@@ -78,8 +78,7 @@ Error BuildIDRewriter::sectionInitializer() {
"out of bounds while reading note section: %s",
toString(Cursor.takeError()).c_str());
- if (Type == ELF::NT_GNU_BUILD_ID && Name.substr(0, 3) == "GNU" &&
- DescSz) {
+ if (Type == ELF::NT_GNU_BUILD_ID && Name.starts_with("GNU") && DescSz) {
BuildIDSection = NoteSection;
BuildID = Desc;
BC.setFileBuildID(getPrintableBuildID(Desc));
diff --git a/bolt/tools/merge-fdata/merge-fdata.cpp b/bolt/tools/merge-fdata/merge-fdata.cpp
index 0ff0e09..cfcb937 100644
--- a/bolt/tools/merge-fdata/merge-fdata.cpp
+++ b/bolt/tools/merge-fdata/merge-fdata.cpp
@@ -124,8 +124,8 @@ void mergeProfileHeaders(BinaryProfileHeader &MergedHeader,
if (!MergedHeader.Flags)
MergedHeader.Flags = Header.Flags;
- constexpr auto Mask =
- llvm::bolt::BinaryFunction::PF_LBR | llvm::bolt::BinaryFunction::PF_IP;
+ constexpr auto Mask = llvm::bolt::BinaryFunction::PF_BRANCH |
+ llvm::bolt::BinaryFunction::PF_BASIC;
if ((MergedHeader.Flags & Mask) != (Header.Flags & Mask)) {
errs() << "ERROR: cannot merge LBR profile with non-LBR profile\n";
exit(1);
diff --git a/clang-tools-extra/clangd/ClangdServer.cpp b/clang-tools-extra/clangd/ClangdServer.cpp
index 5284412..b499683 100644
--- a/clang-tools-extra/clangd/ClangdServer.cpp
+++ b/clang-tools-extra/clangd/ClangdServer.cpp
@@ -457,6 +457,7 @@ void ClangdServer::codeComplete(PathRef File, Position Pos,
CodeCompleteOpts.ArgumentLists = Config::current().Completion.ArgumentLists;
CodeCompleteOpts.InsertIncludes =
Config::current().Completion.HeaderInsertion;
+ CodeCompleteOpts.CodePatterns = Config::current().Completion.CodePatterns;
// FIXME(ibiryukov): even if Preamble is non-null, we may want to check
// both the old and the new version in case only one of them matches.
CodeCompleteResult Result = clangd::codeComplete(
diff --git a/clang-tools-extra/clangd/CodeComplete.cpp b/clang-tools-extra/clangd/CodeComplete.cpp
index 0eb196f..14679fe 100644
--- a/clang-tools-extra/clangd/CodeComplete.cpp
+++ b/clang-tools-extra/clangd/CodeComplete.cpp
@@ -926,7 +926,8 @@ struct CompletionRecorder : public CodeCompleteConsumer {
// FIXME: in case there is no future sema completion callback after the
// recovery mode, we might still want to provide some results (e.g. trivial
// identifier-based completion).
- if (Context.getKind() == CodeCompletionContext::CCC_Recovery) {
+ CodeCompletionContext::Kind ContextKind = Context.getKind();
+ if (ContextKind == CodeCompletionContext::CCC_Recovery) {
log("Code complete: Ignoring sema code complete callback with Recovery "
"context.");
return;
@@ -950,6 +951,12 @@ struct CompletionRecorder : public CodeCompleteConsumer {
// Retain the results we might want.
for (unsigned I = 0; I < NumResults; ++I) {
auto &Result = InResults[I];
+ if (Config::current().Completion.CodePatterns ==
+ Config::CodePatternsPolicy::None &&
+ Result.Kind == CodeCompletionResult::RK_Pattern &&
+ // keep allowing the include files autocomplete suggestions
+ ContextKind != CodeCompletionContext::CCC_IncludedFile)
+ continue;
// Class members that are shadowed by subclasses are usually noise.
if (Result.Hidden && Result.Declaration &&
Result.Declaration->isCXXClassMember())
@@ -2153,7 +2160,8 @@ private:
clang::CodeCompleteOptions CodeCompleteOptions::getClangCompleteOpts() const {
clang::CodeCompleteOptions Result;
- Result.IncludeCodePatterns = EnableSnippets;
+ Result.IncludeCodePatterns =
+ EnableSnippets && (CodePatterns != Config::CodePatternsPolicy::None);
Result.IncludeMacros = true;
Result.IncludeGlobals = true;
// We choose to include full comments and not do doxygen parsing in
diff --git a/clang-tools-extra/clangd/CodeComplete.h b/clang-tools-extra/clangd/CodeComplete.h
index 83d3470..1cf3b41 100644
--- a/clang-tools-extra/clangd/CodeComplete.h
+++ b/clang-tools-extra/clangd/CodeComplete.h
@@ -111,6 +111,9 @@ struct CodeCompleteOptions {
Config::ArgumentListsPolicy ArgumentLists =
Config::ArgumentListsPolicy::FullPlaceholders;
+ /// Whether to suggest code patterns & snippets or not in completion
+ Config::CodePatternsPolicy CodePatterns = Config::CodePatternsPolicy::All;
+
/// Whether to use the clang parser, or fallback to text-based completion
/// (using identifiers in the current file and symbol indexes).
enum CodeCompletionParse {
diff --git a/clang-tools-extra/clangd/CompileCommands.cpp b/clang-tools-extra/clangd/CompileCommands.cpp
index 207e4c3..808d899 100644
--- a/clang-tools-extra/clangd/CompileCommands.cpp
+++ b/clang-tools-extra/clangd/CompileCommands.cpp
@@ -404,8 +404,7 @@ enum DriverMode : unsigned char {
DriverMode getDriverMode(const std::vector<std::string> &Args) {
DriverMode Mode = DM_GCC;
llvm::StringRef Argv0 = Args.front();
- if (Argv0.ends_with_insensitive(".exe"))
- Argv0 = Argv0.drop_back(strlen(".exe"));
+ Argv0.consume_back_insensitive(".exe");
if (Argv0.ends_with_insensitive("cl"))
Mode = DM_CL;
for (const llvm::StringRef Arg : Args) {
diff --git a/clang-tools-extra/clangd/Config.h b/clang-tools-extra/clangd/Config.h
index 2891a6d..83e0fce 100644
--- a/clang-tools-extra/clangd/Config.h
+++ b/clang-tools-extra/clangd/Config.h
@@ -152,6 +152,11 @@ struct Config {
NeverInsert // Never insert headers as part of code completion
};
+ enum class CodePatternsPolicy {
+ All, // Suggest all code patterns and snippets
+ None // Suggest none of the code patterns and snippets
+ };
+
/// Configures code completion feature.
struct {
/// Whether code completion includes results that are not visible in current
@@ -161,6 +166,8 @@ struct Config {
ArgumentListsPolicy ArgumentLists = ArgumentListsPolicy::FullPlaceholders;
/// Controls if headers should be inserted when completions are accepted
HeaderInsertionPolicy HeaderInsertion = HeaderInsertionPolicy::IWYU;
+ /// Enables code patterns & snippets suggestions
+ CodePatternsPolicy CodePatterns = CodePatternsPolicy::All;
} Completion;
/// Configures hover feature.
diff --git a/clang-tools-extra/clangd/ConfigCompile.cpp b/clang-tools-extra/clangd/ConfigCompile.cpp
index 13c2405..35d35f7 100644
--- a/clang-tools-extra/clangd/ConfigCompile.cpp
+++ b/clang-tools-extra/clangd/ConfigCompile.cpp
@@ -707,6 +707,17 @@ struct FragmentCompiler {
C.Completion.HeaderInsertion = *Val;
});
}
+
+ if (F.CodePatterns) {
+ if (auto Val = compileEnum<Config::CodePatternsPolicy>("CodePatterns",
+ *F.CodePatterns)
+ .map("All", Config::CodePatternsPolicy::All)
+ .map("None", Config::CodePatternsPolicy::None)
+ .value())
+ Out.Apply.push_back([Val](const Params &, Config &C) {
+ C.Completion.CodePatterns = *Val;
+ });
+ }
}
void compile(Fragment::HoverBlock &&F) {
diff --git a/clang-tools-extra/clangd/ConfigFragment.h b/clang-tools-extra/clangd/ConfigFragment.h
index 2363b48..9e00dbc 100644
--- a/clang-tools-extra/clangd/ConfigFragment.h
+++ b/clang-tools-extra/clangd/ConfigFragment.h
@@ -349,6 +349,11 @@ struct Fragment {
/// symbol is forward-declared
/// "Never": Never insert headers
std::optional<Located<std::string>> HeaderInsertion;
+ /// Will suggest code patterns & snippets.
+ /// Values are Config::CodePatternsPolicy:
+ /// All => enable all code patterns and snippets suggestion
+ /// None => disable all code patterns and snippets suggestion
+ std::optional<Located<std::string>> CodePatterns;
};
CompletionBlock Completion;
diff --git a/clang-tools-extra/clangd/ConfigYAML.cpp b/clang-tools-extra/clangd/ConfigYAML.cpp
index 47c6e1c..ff457d8 100644
--- a/clang-tools-extra/clangd/ConfigYAML.cpp
+++ b/clang-tools-extra/clangd/ConfigYAML.cpp
@@ -249,6 +249,10 @@ private:
if (auto HeaderInsertion = scalarValue(N, "HeaderInsertion"))
F.HeaderInsertion = *HeaderInsertion;
});
+ Dict.handle("CodePatterns", [&](Node &N) {
+ if (auto CodePatterns = scalarValue(N, "CodePatterns"))
+ F.CodePatterns = *CodePatterns;
+ });
Dict.parse(N);
}
diff --git a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
index 718bee2..311f0d9 100644
--- a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
+++ b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
@@ -3326,6 +3326,40 @@ TEST(CompletionTest, AllScopesCompletion) {
kind(CompletionItemKind::EnumMember))));
}
+TEST(CompletionTest, NoCodePatternsIfDisabled) {
+ clangd::CodeCompleteOptions Opts = {};
+ Opts.EnableSnippets = true;
+ Opts.CodePatterns = Config::CodePatternsPolicy::None;
+
+ auto Results = completions(R"cpp(
+ void function() {
+ /// Trying to trigger "for (init-statement; condition; inc-expression)
+ /// {statements}~" code pattern
+ for^
+ }
+ )cpp",
+ {}, Opts);
+
+ EXPECT_THAT(Results.Completions,
+ Not(Contains(kind(CompletionItemKind::Snippet))));
+}
+
+TEST(CompletionTest, CompleteIncludeIfCodePatternsNone) {
+ clangd::CodeCompleteOptions Opts = {};
+ Opts.EnableSnippets = true;
+ Opts.CodePatterns = Config::CodePatternsPolicy::None;
+
+ Annotations Test(R"cpp(#include "^)cpp");
+ auto TU = TestTU::withCode(Test.code());
+ TU.AdditionalFiles["foo/bar.h"] = "";
+ TU.ExtraArgs.push_back("-I" + testPath("foo"));
+
+ auto Results = completions(TU, Test.point(), {}, Opts);
+ EXPECT_THAT(Results.Completions,
+ AllOf(has("foo/", CompletionItemKind::Folder),
+ has("bar.h\"", CompletionItemKind::File)));
+}
+
TEST(CompletionTest, NoQualifierIfShadowed) {
clangd::CodeCompleteOptions Opts = {};
Opts.AllScopes = true;
diff --git a/clang-tools-extra/clangd/unittests/ConfigYAMLTests.cpp b/clang-tools-extra/clangd/unittests/ConfigYAMLTests.cpp
index 979d7254..d71b8d5 100644
--- a/clang-tools-extra/clangd/unittests/ConfigYAMLTests.cpp
+++ b/clang-tools-extra/clangd/unittests/ConfigYAMLTests.cpp
@@ -217,6 +217,19 @@ Completion:
EXPECT_THAT(Results[0].Completion.AllScopes, testing::Eq(std::nullopt));
}
+TEST(ParseYAML, CodePatterns) {
+ CapturedDiags Diags;
+ Annotations YAML(R"yaml(
+ Completion:
+ CodePatterns: None
+ )yaml");
+ auto Results =
+ Fragment::parseYAML(YAML.code(), "config.yaml", Diags.callback());
+ ASSERT_THAT(Diags.Diagnostics, IsEmpty());
+ ASSERT_EQ(Results.size(), 1u);
+ EXPECT_THAT(Results[0].Completion.CodePatterns, llvm::ValueIs(val("None")));
+}
+
TEST(ParseYAML, ShowAKA) {
CapturedDiags Diags;
Annotations YAML(R"yaml(
diff --git a/clang-tools-extra/modularize/Modularize.cpp b/clang-tools-extra/modularize/Modularize.cpp
index 8609692..7f8a192 100644
--- a/clang-tools-extra/modularize/Modularize.cpp
+++ b/clang-tools-extra/modularize/Modularize.cpp
@@ -406,11 +406,8 @@ struct Location {
}
friend bool operator<(const Location &X, const Location &Y) {
- if (X.File != Y.File)
- return X.File < Y.File;
- if (X.Line != Y.Line)
- return X.Line < Y.Line;
- return X.Column < Y.Column;
+ return std::tie(X.File, X.Line, X.Column) <
+ std::tie(Y.File, Y.Line, Y.Column);
}
friend bool operator>(const Location &X, const Location &Y) { return Y < X; }
friend bool operator<=(const Location &X, const Location &Y) {
diff --git a/clang-tools-extra/modularize/PreprocessorTracker.cpp b/clang-tools-extra/modularize/PreprocessorTracker.cpp
index 0c030f1..336f570 100644
--- a/clang-tools-extra/modularize/PreprocessorTracker.cpp
+++ b/clang-tools-extra/modularize/PreprocessorTracker.cpp
@@ -494,19 +494,8 @@ public:
return Column == Other.Column;
}
bool operator<(const PPItemKey &Other) const {
- if (Name < Other.Name)
- return true;
- else if (Name > Other.Name)
- return false;
- if (File < Other.File)
- return true;
- else if (File > Other.File)
- return false;
- if (Line < Other.Line)
- return true;
- else if (Line > Other.Line)
- return false;
- return Column < Other.Column;
+ return std::tie(Name, File, Line, Column) <
+ std::tie(Other.Name, Other.File, Other.Line, Other.Column);
}
StringHandle Name;
HeaderHandle File;
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index e362ec5..fb03181 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -521,6 +521,9 @@ Improvements to Clang's diagnostics
- Fixed a duplicate diagnostic when performing typo correction on function template
calls with explicit template arguments. (#GH139226)
+- An error is now emitted when OpenMP ``collapse`` and ``ordered`` clauses have an
+ argument larger than what can fit within a 64-bit integer.
+
Improvements to Clang's time-trace
----------------------------------
@@ -920,6 +923,9 @@ OpenMP Support
``partial`` was an invalid expression. (#GH139267)
- Fixed a crashing bug with ``omp tile sizes`` if the argument to ``sizes`` was
an invalid expression. (#GH139073)
+- Fixed a crashing bug with ``omp simd collapse`` if the argument to
+ ``collapse`` was an invalid expression. (#GH138493)
+- Fixed a crashing bug with a malformed ``cancel`` directive. (#GH139360)
- Fixed a crashing bug with ``omp distribute dist_schedule`` if the argument to
``dist_schedule`` was not strictly positive. (#GH139266)
diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h
index 3faf63e..f1013c5 100644
--- a/clang/include/clang/AST/Decl.h
+++ b/clang/include/clang/AST/Decl.h
@@ -41,6 +41,7 @@
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Frontend/HLSL/HLSLRootSignature.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
@@ -5178,6 +5179,42 @@ public:
friend class ASTDeclWriter;
};
+class HLSLRootSignatureDecl final
+ : public NamedDecl,
+ private llvm::TrailingObjects<HLSLRootSignatureDecl,
+ llvm::hlsl::rootsig::RootElement> {
+ friend TrailingObjects;
+
+ unsigned NumElems;
+
+ llvm::hlsl::rootsig::RootElement *getElems() {
+ return getTrailingObjects<llvm::hlsl::rootsig::RootElement>();
+ }
+
+ const llvm::hlsl::rootsig::RootElement *getElems() const {
+ return getTrailingObjects<llvm::hlsl::rootsig::RootElement>();
+ }
+
+ HLSLRootSignatureDecl(DeclContext *DC, SourceLocation Loc, IdentifierInfo *ID,
+ unsigned NumElems);
+
+public:
+ static HLSLRootSignatureDecl *
+ Create(ASTContext &C, DeclContext *DC, SourceLocation Loc, IdentifierInfo *ID,
+ ArrayRef<llvm::hlsl::rootsig::RootElement> RootElements);
+
+ static HLSLRootSignatureDecl *CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID);
+
+ ArrayRef<llvm::hlsl::rootsig::RootElement> getRootElements() const {
+ return {getElems(), NumElems};
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == HLSLRootSignature; }
+};
+
/// Insertion operator for diagnostics. This allows sending NamedDecl's
/// into a diagnostic with <<.
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h
index a8100b6..80c9768 100644
--- a/clang/include/clang/AST/DeclTemplate.h
+++ b/clang/include/clang/AST/DeclTemplate.h
@@ -781,16 +781,15 @@ protected:
bool loadLazySpecializationsImpl(llvm::ArrayRef<TemplateArgument> Args,
TemplateParameterList *TPL = nullptr) const;
- template <class EntryType, typename ...ProfileArguments>
- typename SpecEntryTraits<EntryType>::DeclType*
+ template <class EntryType, typename... ProfileArguments>
+ typename SpecEntryTraits<EntryType>::DeclType *
findSpecializationImpl(llvm::FoldingSetVector<EntryType> &Specs,
- void *&InsertPos, ProfileArguments &&...ProfileArgs);
+ void *&InsertPos, ProfileArguments... ProfileArgs);
template <class EntryType, typename... ProfileArguments>
typename SpecEntryTraits<EntryType>::DeclType *
findSpecializationLocally(llvm::FoldingSetVector<EntryType> &Specs,
- void *&InsertPos,
- ProfileArguments &&...ProfileArgs);
+ void *&InsertPos, ProfileArguments... ProfileArgs);
template <class Derived, class EntryType>
void addSpecializationImpl(llvm::FoldingSetVector<EntryType> &Specs,
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index 3edc868..23a8c4f 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -1599,6 +1599,8 @@ DEF_TRAVERSE_DECL(EmptyDecl, {})
DEF_TRAVERSE_DECL(HLSLBufferDecl, {})
+DEF_TRAVERSE_DECL(HLSLRootSignatureDecl, {})
+
DEF_TRAVERSE_DECL(LifetimeExtendedTemporaryDecl, {
TRY_TO(TraverseStmt(D->getTemporaryExpr()));
})
diff --git a/clang/include/clang/AST/TextNodeDumper.h b/clang/include/clang/AST/TextNodeDumper.h
index ea3a0f05..1917a8a 100644
--- a/clang/include/clang/AST/TextNodeDumper.h
+++ b/clang/include/clang/AST/TextNodeDumper.h
@@ -408,6 +408,7 @@ public:
void
VisitLifetimeExtendedTemporaryDecl(const LifetimeExtendedTemporaryDecl *D);
void VisitHLSLBufferDecl(const HLSLBufferDecl *D);
+ void VisitHLSLRootSignatureDecl(const HLSLRootSignatureDecl *D);
void VisitHLSLOutArgExpr(const HLSLOutArgExpr *E);
void VisitOpenACCConstructStmt(const OpenACCConstructStmt *S);
void VisitOpenACCLoopConstruct(const OpenACCLoopConstruct *S);
diff --git a/clang/include/clang/Analysis/CFG.h b/clang/include/clang/Analysis/CFG.h
index a7ff38c..e70c703 100644
--- a/clang/include/clang/Analysis/CFG.h
+++ b/clang/include/clang/Analysis/CFG.h
@@ -122,7 +122,8 @@ public:
return (Kind) x;
}
- void dumpToStream(llvm::raw_ostream &OS) const;
+ void dumpToStream(llvm::raw_ostream &OS,
+ bool TerminateWithNewLine = true) const;
void dump() const {
dumpToStream(llvm::errs());
@@ -695,6 +696,11 @@ class CFGBlock {
void dump() const {
dumpToStream(llvm::errs());
}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(Parent);
+ ID.AddInteger(Index);
+ }
};
template <bool IsReverse, bool IsConst> class ElementRefIterator {
@@ -1190,6 +1196,8 @@ public:
}
};
+using ConstCFGElementRef = CFGBlock::ConstCFGElementRef;
+
/// CFGCallback defines methods that should be called when a logical
/// operator error is found when building the CFG.
class CFGCallback {
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 37c80ac..ccd13a4 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -4735,6 +4735,17 @@ def Error : InheritableAttr {
let Documentation = [ErrorAttrDocs];
}
+/// HLSL Root Signature Attribute
+def RootSignature : Attr {
+ /// [RootSignature(Signature)]
+ let Spellings = [Microsoft<"RootSignature">];
+ let Args = [IdentifierArgument<"Signature">];
+ let Subjects = SubjectList<[Function],
+ ErrorDiag, "'function'">;
+ let LangOpts = [HLSL];
+ let Documentation = [RootSignatureDocs];
+}
+
def HLSLNumThreads: InheritableAttr {
let Spellings = [Microsoft<"numthreads">];
let Args = [IntArgument<"X">, IntArgument<"Y">, IntArgument<"Z">];
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index cbb397c..5fb5f16 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -8195,6 +8195,17 @@ and https://microsoft.github.io/hlsl-specs/proposals/0013-wave-size-range.html
}];
}
+def RootSignatureDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The ``RootSignature`` attribute applies to HLSL entry functions to define what
+types of resources are bound to the graphics pipeline.
+
+For details about the use and specification of Root Signatures please see here:
+https://learn.microsoft.com/en-us/windows/win32/direct3d12/root-signatures
+ }];
+}
+
def NumThreadsDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
diff --git a/clang/include/clang/Basic/DeclNodes.td b/clang/include/clang/Basic/DeclNodes.td
index 20debd67..f1ebaf1 100644
--- a/clang/include/clang/Basic/DeclNodes.td
+++ b/clang/include/clang/Basic/DeclNodes.td
@@ -111,5 +111,6 @@ def Empty : DeclNode<Decl>;
def RequiresExprBody : DeclNode<Decl>, DeclContext;
def LifetimeExtendedTemporary : DeclNode<Decl>;
def HLSLBuffer : DeclNode<Named, "HLSLBuffer">, DeclContext;
+def HLSLRootSignature : DeclNode<Named, "HLSLRootSignature">;
def OpenACCDeclare : DeclNode<Decl, "#pragma acc declare">;
def OpenACCRoutine : DeclNode<Decl, "#pragma acc routine">;
diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td
index 3bbdc49..fd52514 100644
--- a/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -1469,7 +1469,8 @@ def err_acc_expected_reduction_operator
def err_acc_invalid_reduction_operator
: Error<"invalid reduction operator, expected '+', '*', 'max', 'min', "
"'&', '|', '^', '&&', or '||'">;
-def err_acc_incorrect_bind_arg : Error<"expected identifier or string literal">;
+def err_acc_incorrect_bind_arg
+ : Error<"expected identifier or string literal in OpenACC 'bind' clause">;
def err_acc_modifier
: Error<"%enum_select<ACCModifier>{%Unknown{unknown}|%Duplicate{duplicate}}"
"0 modifier %1 in OpenACC modifier-list on '%2' clause">;
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index ca47cf6..3efe959 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -11526,6 +11526,8 @@ def note_omp_collapse_ordered_expr : Note<
"as specified in %select{'collapse'|'ordered'|'collapse' and 'ordered'}0 clause%select{||s}0">;
def err_omp_negative_expression_in_clause : Error<
"argument to '%0' clause must be a %select{non-negative|strictly positive}1 integer value">;
+def err_omp_large_expression_in_clause : Error<
+ "argument to '%0' clause requires a value that can be represented by a 64-bit">;
def err_omp_not_integral : Error<
"expression must have integral or unscoped enumeration "
"type, not %0">;
diff --git a/clang/include/clang/Basic/SourceManager.h b/clang/include/clang/Basic/SourceManager.h
index e0f1ea4..3762dbc 100644
--- a/clang/include/clang/Basic/SourceManager.h
+++ b/clang/include/clang/Basic/SourceManager.h
@@ -1529,6 +1529,15 @@ public:
return Filename == "<scratch space>";
}
+ /// Returns whether \p Loc is located in a built-in or command line source.
+ bool isInPredefinedFile(SourceLocation Loc) const {
+ PresumedLoc Presumed = getPresumedLoc(Loc);
+ if (Presumed.isInvalid())
+ return false;
+ StringRef Filename(Presumed.getFilename());
+ return Filename == "<built-in>" || Filename == "<command line>";
+ }
+
/// Returns if a SourceLocation is in a system header.
bool isInSystemHeader(SourceLocation Loc) const {
if (Loc.isInvalid())
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 7aff5ed..e8020b0 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1970,6 +1970,42 @@ def VecCreateOp : CIR_Op<"vec.create", [Pure]> {
}
//===----------------------------------------------------------------------===//
+// VecInsertOp
+//===----------------------------------------------------------------------===//
+
+def VecInsertOp : CIR_Op<"vec.insert", [Pure,
+ TypesMatchWith<"argument type matches vector element type", "vec", "value",
+ "cast<VectorType>($_self).getElementType()">,
+ AllTypesMatch<["result", "vec"]>]> {
+
+ let summary = "Insert one element into a vector object";
+ let description = [{
+ The `cir.vec.insert` operation produces a new vector by replacing
+ the element of the input vector at `index` with `value`.
+
+ ```mlir
+ %value = cir.const #cir.int<5> : !s32i
+ %index = cir.const #cir.int<2> : !s32i
+ %vec_tmp = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+ %new_vec = cir.vec.insert %value, %vec_tmp[%index : !s32i] : !cir.vector<4 x !s32i>
+ ```
+ }];
+
+ let arguments = (ins
+ CIR_VectorType:$vec,
+ AnyType:$value,
+ CIR_AnyFundamentalIntType:$index
+ );
+
+ let results = (outs CIR_VectorType:$result);
+
+ let assemblyFormat = [{
+ $value `,` $vec `[` $index `:` type($index) `]` attr-dict `:`
+ qualified(type($vec))
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// VecExtractOp
//===----------------------------------------------------------------------===//
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index e148a0a..484822c 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -179,6 +179,7 @@ struct MissingFeatures {
static bool msabi() { return false; }
static bool typeChecks() { return false; }
static bool lambdaFieldToName() { return false; }
+ static bool updateCompletedType() { return false; }
static bool targetSpecificCXXABI() { return false; }
static bool moduleNameHash() { return false; }
static bool setDSOLocal() { return false; }
diff --git a/clang/include/clang/Driver/Compilation.h b/clang/include/clang/Driver/Compilation.h
index 36ae85c..26781fc 100644
--- a/clang/include/clang/Driver/Compilation.h
+++ b/clang/include/clang/Driver/Compilation.h
@@ -90,14 +90,8 @@ class Compilation {
: TC(TC), BoundArch(BoundArch), DeviceOffloadKind(DeviceOffloadKind) {}
bool operator<(const TCArgsKey &K) const {
- if (TC < K.TC)
- return true;
- else if (TC == K.TC && BoundArch < K.BoundArch)
- return true;
- else if (TC == K.TC && BoundArch == K.BoundArch &&
- DeviceOffloadKind < K.DeviceOffloadKind)
- return true;
- return false;
+ return std::tie(TC, BoundArch, DeviceOffloadKind) <
+ std::tie(K.TC, K.BoundArch, K.DeviceOffloadKind);
}
};
std::map<TCArgsKey, llvm::opt::DerivedArgList *> TCArgs;
diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h
index d2fec2b..00538fd 100644
--- a/clang/include/clang/Parse/Parser.h
+++ b/clang/include/clang/Parse/Parser.h
@@ -3093,6 +3093,7 @@ private:
return AttrsParsed;
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
+ void ParseMicrosoftRootSignatureAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &Attrs);
bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) {
if (getLangOpts().DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
@@ -3537,6 +3538,9 @@ private:
DeclarationName &Name,
AccessSpecifier AS = AS_none);
+ /// Parses 'omp begin declare variant' directive.
+ bool ParseOpenMPDeclareBeginVariantDirective(SourceLocation Loc);
+
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
diff --git a/clang/include/clang/Sema/SemaHLSL.h b/clang/include/clang/Sema/SemaHLSL.h
index 5d260ac..e340547 100644
--- a/clang/include/clang/Sema/SemaHLSL.h
+++ b/clang/include/clang/Sema/SemaHLSL.h
@@ -119,6 +119,7 @@ public:
bool IsCompAssign);
void emitLogicalOperatorFixIt(Expr *LHS, Expr *RHS, BinaryOperatorKind Opc);
+ void handleRootSignatureAttr(Decl *D, const ParsedAttr &AL);
void handleNumThreadsAttr(Decl *D, const ParsedAttr &AL);
void handleWaveSizeAttr(Decl *D, const ParsedAttr &AL);
void handleSV_DispatchThreadIDAttr(Decl *D, const ParsedAttr &AL);
diff --git a/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h b/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
index 519d2d5..6c1025e 100644
--- a/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
+++ b/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
@@ -19,6 +19,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
namespace clang {
@@ -29,6 +30,13 @@ private:
ASTContext &ACtx;
ProgramStateRef State;
+ std::string printCFGElementRef(ConstCFGElementRef Elem) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ Elem->dumpToStream(OS, /*TerminateWithNewLine=*/false);
+ return Str;
+ }
+
std::string printStmt(const Stmt *S) {
std::string Str;
llvm::raw_string_ostream OS(Str);
@@ -114,7 +122,8 @@ public:
std::string VisitSymbolConjured(const SymbolConjured *S) {
return "symbol of type '" + S->getType().getAsString() +
- "' conjured at statement '" + printStmt(S->getStmt()) + "'";
+ "' conjured at CFG element '" +
+ printCFGElementRef(S->getCFGElementRef()) + "'";
}
std::string VisitSymbolDerived(const SymbolDerived *S) {
diff --git a/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def b/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
index f9f22a9..fab19c7 100644
--- a/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
+++ b/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
@@ -414,6 +414,19 @@ ANALYZER_OPTION(
"any target.",
true)
+ANALYZER_OPTION(
+ bool, InlineFunctionsWithAmbiguousLoops, "inline-functions-with-ambiguous-loops",
+ "If disabled (the default), the analyzer puts functions on a \"do not "
+ "inline this\" list if it finds an execution path within that function "
+ "that may potentially perform 'analyzer-max-loop' (= 4 by default) "
+ "iterations in a loop. (Note that functions that _definitely_ reach the "
+ "loop limit on some execution path are currently marked as \"do not "
+ "inline\" even if this option is enabled.) Enabling this option "
+ "eliminates this (somewhat arbitrary) restriction from the analysis "
+ "scope, which increases the analysis runtime (on average by ~10%, but "
+ "a few translation units may see much larger slowdowns).",
+ false)
+
//===----------------------------------------------------------------------===//
// Unsigned analyzer options.
//===----------------------------------------------------------------------===//
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index bb33a69..63ca3ef 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -151,6 +151,8 @@ public:
return Pred->getSVal(S);
}
+ ConstCFGElementRef getCFGElementRef() const { return Eng.getCFGElementRef(); }
+
/// Returns true if the value of \p E is greater than or equal to \p
/// Val under unsigned comparison.
bool isGreaterOrEqual(const Expr *E, unsigned long long Val);
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 5f85525..2851941 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -226,7 +226,7 @@ public:
return (*G.roots_begin())->getLocation().getLocationContext();
}
- CFGBlock::ConstCFGElementRef getCFGElementRef() const {
+ ConstCFGElementRef getCFGElementRef() const {
const CFGBlock *blockPtr = currBldrCtx ? currBldrCtx->getBlock() : nullptr;
return {blockPtr, currStmtIdx};
}
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
index 3ee0d22..7613952 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
@@ -81,10 +81,6 @@ public:
I->second.MayInline = 0;
}
- void markReachedMaxBlockCount(const Decl *D) {
- markShouldNotInline(D);
- }
-
std::optional<bool> mayInline(const Decl *D) {
MapTy::const_iterator I = Map.find(D);
if (I != Map.end() && I->second.InlineChecked)
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h
index e75228f..50f2197 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h
@@ -27,7 +27,8 @@ namespace ento {
/// by the loop body in any iteration.
ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
const LocationContext *LCtx,
- unsigned BlockCount, const Stmt *LoopStmt);
+ unsigned BlockCount,
+ ConstCFGElementRef Elem);
} // end namespace ento
} // end namespace clang
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index 4d66e08..5271453 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -313,7 +313,7 @@ public:
/// be triggered by this event.
///
/// \param Regions the set of regions to be invalidated.
- /// \param E the expression that caused the invalidation.
+ /// \param Elem The CFG Element that caused the invalidation.
/// \param BlockCount The number of times the current basic block has been
/// visited.
/// \param CausesPointerEscape the flag is set to true when the invalidation
@@ -325,16 +325,17 @@ public:
/// \param ITraits information about special handling for particular regions
/// or symbols.
[[nodiscard]] ProgramStateRef
- invalidateRegions(ArrayRef<const MemRegion *> Regions, const Stmt *S,
- unsigned BlockCount, const LocationContext *LCtx,
- bool CausesPointerEscape, InvalidatedSymbols *IS = nullptr,
+ invalidateRegions(ArrayRef<const MemRegion *> Regions,
+ ConstCFGElementRef Elem, unsigned BlockCount,
+ const LocationContext *LCtx, bool CausesPointerEscape,
+ InvalidatedSymbols *IS = nullptr,
const CallEvent *Call = nullptr,
RegionAndSymbolInvalidationTraits *ITraits = nullptr) const;
[[nodiscard]] ProgramStateRef
- invalidateRegions(ArrayRef<SVal> Values, const Stmt *S, unsigned BlockCount,
- const LocationContext *LCtx, bool CausesPointerEscape,
- InvalidatedSymbols *IS = nullptr,
+ invalidateRegions(ArrayRef<SVal> Values, ConstCFGElementRef Elem,
+ unsigned BlockCount, const LocationContext *LCtx,
+ bool CausesPointerEscape, InvalidatedSymbols *IS = nullptr,
const CallEvent *Call = nullptr,
RegionAndSymbolInvalidationTraits *ITraits = nullptr) const;
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index 3f3e6bd..2911554 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -19,6 +19,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Type.h"
+#include "clang/Analysis/CFG.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
@@ -171,19 +172,11 @@ public:
// Forwarding methods to SymbolManager.
- const SymbolConjured* conjureSymbol(const Stmt *stmt,
+ const SymbolConjured *conjureSymbol(ConstCFGElementRef Elem,
const LocationContext *LCtx,
- QualType type,
- unsigned visitCount,
+ QualType type, unsigned visitCount,
const void *symbolTag = nullptr) {
- return SymMgr.conjureSymbol(stmt, LCtx, type, visitCount, symbolTag);
- }
-
- const SymbolConjured* conjureSymbol(const Expr *expr,
- const LocationContext *LCtx,
- unsigned visitCount,
- const void *symbolTag = nullptr) {
- return SymMgr.conjureSymbol(expr, LCtx, visitCount, symbolTag);
+ return SymMgr.conjureSymbol(Elem, LCtx, type, visitCount, symbolTag);
}
/// Construct an SVal representing '0' for the specified type.
@@ -199,16 +192,16 @@ public:
/// preserve the relation between related(or even equivalent) expressions, so
/// conjured symbols should be used sparingly.
DefinedOrUnknownSVal conjureSymbolVal(const void *symbolTag,
- const Expr *expr,
+ ConstCFGElementRef elem,
const LocationContext *LCtx,
unsigned count);
- DefinedOrUnknownSVal conjureSymbolVal(const void *symbolTag, const Stmt *S,
+ DefinedOrUnknownSVal conjureSymbolVal(const void *symbolTag,
+ ConstCFGElementRef elem,
const LocationContext *LCtx,
QualType type, unsigned count);
- DefinedOrUnknownSVal conjureSymbolVal(const Stmt *stmt,
+ DefinedOrUnknownSVal conjureSymbolVal(ConstCFGElementRef elem,
const LocationContext *LCtx,
- QualType type,
- unsigned visitCount);
+ QualType type, unsigned visitCount);
DefinedOrUnknownSVal conjureSymbolVal(const CallEvent &call, QualType type,
unsigned visitCount,
const void *symbolTag = nullptr);
@@ -217,17 +210,7 @@ public:
const void *symbolTag = nullptr);
/// Conjure a symbol representing heap allocated memory region.
- ///
- /// Note, the expression should represent a location.
- DefinedSVal getConjuredHeapSymbolVal(const Expr *E,
- const LocationContext *LCtx,
- unsigned Count);
-
- /// Conjure a symbol representing heap allocated memory region.
- ///
- /// Note, now, the expression *doesn't* need to represent a location.
- /// But the type need to!
- DefinedSVal getConjuredHeapSymbolVal(const Expr *E,
+ DefinedSVal getConjuredHeapSymbolVal(ConstCFGElementRef elem,
const LocationContext *LCtx,
QualType type, unsigned Count);
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index cf7623c..29a53fc 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -14,13 +14,13 @@
#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_STORE_H
#include "clang/AST/Type.h"
+#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
-#include "clang/Basic/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -223,7 +223,7 @@ public:
///
/// \param[in] store The initial store.
/// \param[in] Values The values to invalidate.
- /// \param[in] S The current statement being evaluated. Used to conjure
+ /// \param[in] Elem The current CFG Element being evaluated. Used to conjure
/// symbols to mark the values of invalidated regions.
/// \param[in] Count The current block count. Used to conjure
/// symbols to mark the values of invalidated regions.
@@ -241,8 +241,8 @@ public:
/// even if they do not currently have bindings. Pass \c NULL if this
/// information will not be used.
virtual StoreRef invalidateRegions(
- Store store, ArrayRef<SVal> Values, const Stmt *S, unsigned Count,
- const LocationContext *LCtx, const CallEvent *Call,
+ Store store, ArrayRef<SVal> Values, ConstCFGElementRef Elem,
+ unsigned Count, const LocationContext *LCtx, const CallEvent *Call,
InvalidatedSymbols &IS, RegionAndSymbolInvalidationTraits &ITraits,
InvalidatedRegions *TopLevelRegions, InvalidatedRegions *Invalidated) = 0;
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
index cbbea1b..9e7c98f 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -80,29 +80,62 @@ public:
/// A symbol representing the result of an expression in the case when we do
/// not know anything about what the expression is.
class SymbolConjured : public SymbolData {
- const Stmt *S;
+ ConstCFGElementRef Elem;
QualType T;
unsigned Count;
const LocationContext *LCtx;
const void *SymbolTag;
friend class SymExprAllocator;
- SymbolConjured(SymbolID sym, const Stmt *s, const LocationContext *lctx,
- QualType t, unsigned count, const void *symbolTag)
- : SymbolData(SymbolConjuredKind, sym), S(s), T(t), Count(count),
+ SymbolConjured(SymbolID sym, ConstCFGElementRef elem,
+ const LocationContext *lctx, QualType t, unsigned count,
+ const void *symbolTag)
+ : SymbolData(SymbolConjuredKind, sym), Elem(elem), T(t), Count(count),
LCtx(lctx), SymbolTag(symbolTag) {
- // FIXME: 's' might be a nullptr if we're conducting invalidation
- // that was caused by a destructor call on a temporary object,
- // which has no statement associated with it.
- // Due to this, we might be creating the same invalidation symbol for
- // two different invalidation passes (for two different temporaries).
assert(lctx);
assert(isValidTypeForSymbol(t));
}
public:
- /// It might return null.
- const Stmt *getStmt() const { return S; }
+ ConstCFGElementRef getCFGElementRef() const { return Elem; }
+
+ // It might return null.
+ const Stmt *getStmt() const {
+ switch (Elem->getKind()) {
+ case CFGElement::Initializer:
+ return Elem->castAs<CFGInitializer>().getInitializer()->getInit();
+ case CFGElement::ScopeBegin:
+ return Elem->castAs<CFGScopeBegin>().getTriggerStmt();
+ case CFGElement::ScopeEnd:
+ return Elem->castAs<CFGScopeEnd>().getTriggerStmt();
+ case CFGElement::NewAllocator:
+ return Elem->castAs<CFGNewAllocator>().getAllocatorExpr();
+ case CFGElement::LifetimeEnds:
+ return Elem->castAs<CFGLifetimeEnds>().getTriggerStmt();
+ case CFGElement::LoopExit:
+ return Elem->castAs<CFGLoopExit>().getLoopStmt();
+ case CFGElement::Statement:
+ return Elem->castAs<CFGStmt>().getStmt();
+ case CFGElement::Constructor:
+ return Elem->castAs<CFGConstructor>().getStmt();
+ case CFGElement::CXXRecordTypedCall:
+ return Elem->castAs<CFGCXXRecordTypedCall>().getStmt();
+ case CFGElement::AutomaticObjectDtor:
+ return Elem->castAs<CFGAutomaticObjDtor>().getTriggerStmt();
+ case CFGElement::DeleteDtor:
+ return Elem->castAs<CFGDeleteDtor>().getDeleteExpr();
+ case CFGElement::BaseDtor:
+ return nullptr;
+ case CFGElement::MemberDtor:
+ return nullptr;
+ case CFGElement::TemporaryDtor:
+ return Elem->castAs<CFGTemporaryDtor>().getBindTemporaryExpr();
+ case CFGElement::CleanupFunction:
+ return nullptr;
+ }
+ return nullptr;
+ }
+
unsigned getCount() const { return Count; }
/// It might return null.
const void *getTag() const { return SymbolTag; }
@@ -113,11 +146,11 @@ public:
void dumpToStream(raw_ostream &os) const override;
- static void Profile(llvm::FoldingSetNodeID &profile, const Stmt *S,
+ static void Profile(llvm::FoldingSetNodeID &profile, ConstCFGElementRef Elem,
const LocationContext *LCtx, QualType T, unsigned Count,
const void *SymbolTag) {
profile.AddInteger((unsigned)SymbolConjuredKind);
- profile.AddPointer(S);
+ profile.Add(Elem);
profile.AddPointer(LCtx);
profile.Add(T);
profile.AddInteger(Count);
@@ -125,7 +158,7 @@ public:
}
void Profile(llvm::FoldingSetNodeID& profile) override {
- Profile(profile, S, LCtx, T, Count, SymbolTag);
+ Profile(profile, Elem, LCtx, T, Count, SymbolTag);
}
// Implement isa<T> support.
@@ -533,18 +566,12 @@ public:
template <typename SymExprT, typename... Args>
const SymExprT *acquire(Args &&...args);
- const SymbolConjured *conjureSymbol(const Stmt *E,
+ const SymbolConjured *conjureSymbol(ConstCFGElementRef Elem,
const LocationContext *LCtx, QualType T,
unsigned VisitCount,
const void *SymbolTag = nullptr) {
- return acquire<SymbolConjured>(E, LCtx, T, VisitCount, SymbolTag);
- }
- const SymbolConjured* conjureSymbol(const Expr *E,
- const LocationContext *LCtx,
- unsigned VisitCount,
- const void *SymbolTag = nullptr) {
- return conjureSymbol(E, LCtx, E->getType(), VisitCount, SymbolTag);
+ return acquire<SymbolConjured>(Elem, LCtx, T, VisitCount, SymbolTag);
}
QualType getType(const SymExpr *SE) const {
diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt
index 26d4d04..b5cd14b 100644
--- a/clang/lib/AST/CMakeLists.txt
+++ b/clang/lib/AST/CMakeLists.txt
@@ -2,6 +2,7 @@ set(LLVM_LINK_COMPONENTS
BinaryFormat
Core
FrontendOpenMP
+ FrontendHLSL
Support
TargetParser
)
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index 9cd1c71..061fedb 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -5848,6 +5848,38 @@ bool HLSLBufferDecl::buffer_decls_empty() {
}
//===----------------------------------------------------------------------===//
+// HLSLRootSignatureDecl Implementation
+//===----------------------------------------------------------------------===//
+
+HLSLRootSignatureDecl::HLSLRootSignatureDecl(DeclContext *DC,
+ SourceLocation Loc,
+ IdentifierInfo *ID,
+ unsigned NumElems)
+ : NamedDecl(Decl::Kind::HLSLRootSignature, DC, Loc, DeclarationName(ID)),
+ NumElems(NumElems) {}
+
+HLSLRootSignatureDecl *HLSLRootSignatureDecl::Create(
+ ASTContext &C, DeclContext *DC, SourceLocation Loc, IdentifierInfo *ID,
+ ArrayRef<llvm::hlsl::rootsig::RootElement> RootElements) {
+ HLSLRootSignatureDecl *RSDecl =
+ new (C, DC,
+ additionalSizeToAlloc<llvm::hlsl::rootsig::RootElement>(
+ RootElements.size()))
+ HLSLRootSignatureDecl(DC, Loc, ID, RootElements.size());
+ auto *StoredElems = RSDecl->getElems();
+ std::uninitialized_copy(RootElements.begin(), RootElements.end(),
+ StoredElems);
+ return RSDecl;
+}
+
+HLSLRootSignatureDecl *
+HLSLRootSignatureDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
+ HLSLRootSignatureDecl *Result = new (C, ID)
+ HLSLRootSignatureDecl(nullptr, SourceLocation(), nullptr, /*NumElems=*/0);
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
// ImportDecl Implementation
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index 2052c0c..e30057e 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -886,6 +886,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ObjCProperty:
case MSProperty:
case HLSLBuffer:
+ case HLSLRootSignature:
return IDNS_Ordinary;
case Label:
return IDNS_Label;
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index d058831..6857eef 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -382,12 +382,11 @@ template <class EntryType, typename... ProfileArguments>
typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType *
RedeclarableTemplateDecl::findSpecializationLocally(
llvm::FoldingSetVector<EntryType> &Specs, void *&InsertPos,
- ProfileArguments &&...ProfileArgs) {
+ ProfileArguments... ProfileArgs) {
using SETraits = RedeclarableTemplateDecl::SpecEntryTraits<EntryType>;
llvm::FoldingSetNodeID ID;
- EntryType::Profile(ID, std::forward<ProfileArguments>(ProfileArgs)...,
- getASTContext());
+ EntryType::Profile(ID, ProfileArgs..., getASTContext());
EntryType *Entry = Specs.FindNodeOrInsertPos(ID, InsertPos);
return Entry ? SETraits::getDecl(Entry)->getMostRecentDecl() : nullptr;
}
@@ -396,18 +395,15 @@ template <class EntryType, typename... ProfileArguments>
typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType *
RedeclarableTemplateDecl::findSpecializationImpl(
llvm::FoldingSetVector<EntryType> &Specs, void *&InsertPos,
- ProfileArguments &&...ProfileArgs) {
+ ProfileArguments... ProfileArgs) {
- if (auto *Found = findSpecializationLocally(
- Specs, InsertPos, std::forward<ProfileArguments>(ProfileArgs)...))
+ if (auto *Found = findSpecializationLocally(Specs, InsertPos, ProfileArgs...))
return Found;
- if (!loadLazySpecializationsImpl(
- std::forward<ProfileArguments>(ProfileArgs)...))
+ if (!loadLazySpecializationsImpl(ProfileArgs...))
return nullptr;
- return findSpecializationLocally(
- Specs, InsertPos, std::forward<ProfileArguments>(ProfileArgs)...);
+ return findSpecializationLocally(Specs, InsertPos, ProfileArgs...);
}
template<class Derived, class EntryType>
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 3af6276..112e902 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -24,6 +24,7 @@
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Frontend/HLSL/HLSLRootSignature.h"
#include <algorithm>
#include <utility>
@@ -3037,6 +3038,12 @@ void TextNodeDumper::VisitHLSLBufferDecl(const HLSLBufferDecl *D) {
dumpName(D);
}
+void TextNodeDumper::VisitHLSLRootSignatureDecl(
+ const HLSLRootSignatureDecl *D) {
+ dumpName(D);
+ llvm::hlsl::rootsig::dumpRootElements(OS, D->getRootElements());
+}
+
void TextNodeDumper::VisitHLSLOutArgExpr(const HLSLOutArgExpr *E) {
OS << (E->isInOut() ? " inout" : " out");
}
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index f58d31ba..23876d5 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -5841,16 +5841,17 @@ static void print_construction_context(raw_ostream &OS,
}
static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
- const CFGElement &E);
+ const CFGElement &E, bool TerminateWithNewLine = true);
-void CFGElement::dumpToStream(llvm::raw_ostream &OS) const {
+void CFGElement::dumpToStream(llvm::raw_ostream &OS,
+ bool TerminateWithNewLine) const {
LangOptions LangOpts;
StmtPrinterHelper Helper(nullptr, LangOpts);
- print_elem(OS, Helper, *this);
+ print_elem(OS, Helper, *this, TerminateWithNewLine);
}
static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
- const CFGElement &E) {
+ const CFGElement &E, bool TerminateWithNewLine) {
switch (E.getKind()) {
case CFGElement::Kind::Statement:
case CFGElement::Kind::CXXRecordTypedCall:
@@ -5867,7 +5868,9 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
if (Children.begin() != Children.end()) {
OS << "({ ... ; ";
Helper.handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
- OS << " })\n";
+ OS << " })";
+ if (TerminateWithNewLine)
+ OS << '\n';
return;
}
}
@@ -5876,7 +5879,8 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
if (B->getOpcode() == BO_Comma) {
OS << "... , ";
Helper.handledStmt(B->getRHS(),OS);
- OS << '\n';
+ if (TerminateWithNewLine)
+ OS << '\n';
return;
}
}
@@ -5904,15 +5908,14 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
}
// Expressions need a newline.
- if (isa<Expr>(S))
+ if (isa<Expr>(S) && TerminateWithNewLine)
OS << '\n';
- break;
+ return;
}
case CFGElement::Kind::Initializer:
print_initializer(OS, Helper, E.castAs<CFGInitializer>().getInitializer());
- OS << '\n';
break;
case CFGElement::Kind::AutomaticObjectDtor: {
@@ -5926,43 +5929,44 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
OS << ".~";
T.getUnqualifiedType().print(OS, PrintingPolicy(Helper.getLangOpts()));
- OS << "() (Implicit destructor)\n";
+ OS << "() (Implicit destructor)";
break;
}
case CFGElement::Kind::CleanupFunction:
OS << "CleanupFunction ("
- << E.castAs<CFGCleanupFunction>().getFunctionDecl()->getName() << ")\n";
+ << E.castAs<CFGCleanupFunction>().getFunctionDecl()->getName() << ")";
break;
case CFGElement::Kind::LifetimeEnds:
Helper.handleDecl(E.castAs<CFGLifetimeEnds>().getVarDecl(), OS);
- OS << " (Lifetime ends)\n";
+ OS << " (Lifetime ends)";
break;
case CFGElement::Kind::LoopExit:
- OS << E.castAs<CFGLoopExit>().getLoopStmt()->getStmtClassName() << " (LoopExit)\n";
+ OS << E.castAs<CFGLoopExit>().getLoopStmt()->getStmtClassName()
+ << " (LoopExit)";
break;
case CFGElement::Kind::ScopeBegin:
OS << "CFGScopeBegin(";
if (const VarDecl *VD = E.castAs<CFGScopeBegin>().getVarDecl())
OS << VD->getQualifiedNameAsString();
- OS << ")\n";
+ OS << ")";
break;
case CFGElement::Kind::ScopeEnd:
OS << "CFGScopeEnd(";
if (const VarDecl *VD = E.castAs<CFGScopeEnd>().getVarDecl())
OS << VD->getQualifiedNameAsString();
- OS << ")\n";
+ OS << ")";
break;
case CFGElement::Kind::NewAllocator:
OS << "CFGNewAllocator(";
if (const CXXNewExpr *AllocExpr = E.castAs<CFGNewAllocator>().getAllocatorExpr())
AllocExpr->getType().print(OS, PrintingPolicy(Helper.getLangOpts()));
- OS << ")\n";
+ OS << ")";
break;
case CFGElement::Kind::DeleteDtor: {
@@ -5974,14 +5978,14 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
const_cast<CXXDeleteExpr*>(DE.getDeleteExpr());
Helper.handledStmt(cast<Stmt>(DelExpr->getArgument()), OS);
OS << "->~" << RD->getName().str() << "()";
- OS << " (Implicit destructor)\n";
+ OS << " (Implicit destructor)";
break;
}
case CFGElement::Kind::BaseDtor: {
const CXXBaseSpecifier *BS = E.castAs<CFGBaseDtor>().getBaseSpecifier();
OS << "~" << BS->getType()->getAsCXXRecordDecl()->getName() << "()";
- OS << " (Base object destructor)\n";
+ OS << " (Base object destructor)";
break;
}
@@ -5990,7 +5994,7 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
const Type *T = FD->getType()->getBaseElementTypeUnsafe();
OS << "this->" << FD->getName();
OS << ".~" << T->getAsCXXRecordDecl()->getName() << "()";
- OS << " (Member object destructor)\n";
+ OS << " (Member object destructor)";
break;
}
@@ -5999,10 +6003,12 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
E.castAs<CFGTemporaryDtor>().getBindTemporaryExpr();
OS << "~";
BT->getType().print(OS, PrintingPolicy(Helper.getLangOpts()));
- OS << "() (Temporary object destructor)\n";
+ OS << "() (Temporary object destructor)";
break;
}
}
+ if (TerminateWithNewLine)
+ OS << '\n';
}
static void print_block(raw_ostream &OS, const CFG* cfg,
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 711a652..0386961 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -205,6 +205,17 @@ Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr,
void CIRGenFunction::emitStoreThroughLValue(RValue src, LValue dst,
bool isInit) {
if (!dst.isSimple()) {
+ if (dst.isVectorElt()) {
+ // Read/modify/write the vector, inserting the new element
+ const mlir::Location loc = dst.getVectorPointer().getLoc();
+ const mlir::Value vector =
+ builder.createLoad(loc, dst.getVectorAddress().getPointer());
+ const mlir::Value newVector = builder.create<cir::VecInsertOp>(
+ loc, vector, src.getScalarVal(), dst.getVectorIdx());
+ builder.createStore(loc, newVector, dst.getVectorAddress().getPointer());
+ return;
+ }
+
cgm.errorNYI(dst.getPointer().getLoc(),
"emitStoreThroughLValue: non-simple lvalue");
return;
@@ -418,6 +429,13 @@ RValue CIRGenFunction::emitLoadOfLValue(LValue lv, SourceLocation loc) {
if (lv.isSimple())
return RValue::get(emitLoadOfScalar(lv, loc));
+ if (lv.isVectorElt()) {
+ const mlir::Value load =
+ builder.createLoad(getLoc(loc), lv.getVectorAddress().getPointer());
+ return RValue::get(builder.create<cir::VecExtractOp>(getLoc(loc), load,
+ lv.getVectorIdx()));
+ }
+
cgm.errorNYI(loc, "emitLoadOfLValue");
return RValue::get(nullptr);
}
@@ -638,12 +656,6 @@ static Address emitArraySubscriptPtr(CIRGenFunction &cgf,
LValue
CIRGenFunction::emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e) {
- if (e->getBase()->getType()->isVectorType() &&
- !isa<ExtVectorElementExpr>(e->getBase())) {
- cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: VectorType");
- return LValue::makeAddr(Address::invalid(), e->getType(), LValueBaseInfo());
- }
-
if (isa<ExtVectorElementExpr>(e->getBase())) {
cgm.errorNYI(e->getSourceRange(),
"emitArraySubscriptExpr: ExtVectorElementExpr");
@@ -666,18 +678,28 @@ CIRGenFunction::emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e) {
assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
"index was neither LHS nor RHS");
- auto emitIdxAfterBase = [&]() -> mlir::Value {
+ auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
const mlir::Value idx = emitScalarExpr(e->getIdx());
// Extend or truncate the index type to 32 or 64-bits.
auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
- if (ptrTy && mlir::isa<cir::IntType>(ptrTy.getPointee()))
+ if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
cgm.errorNYI(e->getSourceRange(),
"emitArraySubscriptExpr: index type cast");
return idx;
};
- const mlir::Value idx = emitIdxAfterBase();
+ // If the base is a vector type, then we are forming a vector element
+ // with this subscript.
+ if (e->getBase()->getType()->isVectorType() &&
+ !isa<ExtVectorElementExpr>(e->getBase())) {
+ const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
+ const LValue lhs = emitLValue(e->getBase());
+ return LValue::makeVectorElt(lhs.getAddress(), idx, e->getBase()->getType(),
+ lhs.getBaseInfo());
+ }
+
+ const mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
LValue arrayLV;
if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 82bd139..6899e49 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -787,7 +787,7 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
case Decl::OpenACCDeclare:
emitGlobalOpenACCDecl(cast<OpenACCDeclareDecl>(decl));
break;
-
+ case Decl::Enum:
case Decl::UsingDirective: // using namespace X; [C++]
case Decl::Typedef:
case Decl::TypeAlias: // using foo = bar; [C++11]
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
index 313a6a0..eecd29c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
@@ -420,6 +420,19 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
break;
}
+ case Type::Enum: {
+ // TODO(cir): Implement updateCompletedType for enums.
+ assert(!cir::MissingFeatures::updateCompletedType());
+ const EnumDecl *ED = cast<EnumType>(ty)->getDecl();
+ if (auto integerType = ED->getIntegerType(); !integerType.isNull())
+ return convertType(integerType);
+ // Return a placeholder 'i32' type. This can be changed later when the
+ // type is defined (see UpdateCompletedType), but is likely to be the
+ // "right" answer.
+ resultType = cgm.UInt32Ty;
+ break;
+ }
+
case Type::FunctionNoProto:
case Type::FunctionProto:
resultType = convertFunctionTypeInternal(type);
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index ce87496..3feadfaf 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -116,6 +116,7 @@ class LValue {
// this is the alignment of the whole vector)
unsigned alignment;
mlir::Value v;
+ mlir::Value vectorIdx; // Index for vector subscript
mlir::Type elementType;
LValueBaseInfo baseInfo;
@@ -136,6 +137,7 @@ class LValue {
public:
bool isSimple() const { return lvType == Simple; }
+ bool isVectorElt() const { return lvType == VectorElt; }
bool isBitField() const { return lvType == BitField; }
// TODO: Add support for volatile
@@ -176,6 +178,31 @@ public:
r.initialize(t, t.getQualifiers(), address.getAlignment(), baseInfo);
return r;
}
+
+ Address getVectorAddress() const {
+ return Address(getVectorPointer(), elementType, getAlignment());
+ }
+
+ mlir::Value getVectorPointer() const {
+ assert(isVectorElt());
+ return v;
+ }
+
+ mlir::Value getVectorIdx() const {
+ assert(isVectorElt());
+ return vectorIdx;
+ }
+
+ static LValue makeVectorElt(Address vecAddress, mlir::Value index,
+ clang::QualType t, LValueBaseInfo baseInfo) {
+ LValue r;
+ r.lvType = VectorElt;
+ r.v = vecAddress.getPointer();
+ r.elementType = vecAddress.getElementType();
+ r.vectorIdx = index;
+ r.initialize(t, t.getQualifiers(), vecAddress.getAlignment(), baseInfo);
+ return r;
+ }
};
/// An aggregate value slot.
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 5986655..9c46bd3 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1646,7 +1646,8 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMTrapOpLowering,
CIRToLLVMUnaryOpLowering,
CIRToLLVMVecCreateOpLowering,
- CIRToLLVMVecExtractOpLowering
+ CIRToLLVMVecExtractOpLowering,
+ CIRToLLVMVecInsertOpLowering
// clang-format on
>(converter, patterns.getContext());
@@ -1763,6 +1764,14 @@ mlir::LogicalResult CIRToLLVMVecExtractOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMVecInsertOpLowering::matchAndRewrite(
+ cir::VecInsertOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::InsertElementOp>(
+ op, adaptor.getVec(), adaptor.getValue(), adaptor.getIndex());
+ return mlir::success();
+}
+
std::unique_ptr<mlir::Pass> createConvertCIRToLLVMPass() {
return std::make_unique<ConvertCIRToLLVMPass>();
}
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index 0ac1b6d..bd077e3 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -322,6 +322,16 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMVecInsertOpLowering
+ : public mlir::OpConversionPattern<cir::VecInsertOp> {
+public:
+ using mlir::OpConversionPattern<cir::VecInsertOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VecInsertOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
} // namespace direct
} // namespace cir
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index ba0d87f..40627d6 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -40,8 +40,7 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
- if (!name.empty() && name[0] == '\01')
- name = name.substr(1);
+ name.consume_front("\01");
}
// Anchor the vtable to this translation unit.
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 3513175..2a11eeb 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -4500,8 +4500,7 @@ void CGDebugInfo::emitFunctionStart(GlobalDecl GD, SourceLocation Loc,
Flags |= llvm::DINode::FlagPrototyped;
}
- if (Name.starts_with("\01"))
- Name = Name.substr(1);
+ Name.consume_front("\01");
assert((!D || !isa<VarDecl>(D) ||
GD.getDynamicInitKind() != DynamicInitKind::NoStub) &&
@@ -4590,8 +4589,7 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
} else {
llvm_unreachable("not a function or ObjC method");
}
- if (!Name.empty() && Name[0] == '\01')
- Name = Name.substr(1);
+ Name.consume_front("\01");
if (D->isImplicit()) {
Flags |= llvm::DINode::FlagArtificial;
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index fe8c3cb..4a8f7f6 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -106,6 +106,7 @@ void CodeGenFunction::EmitDecl(const Decl &D, bool EvaluateConditionDecl) {
case Decl::Binding:
case Decl::UnresolvedUsingIfExists:
case Decl::HLSLBuffer:
+ case Decl::HLSLRootSignature:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Record: // struct/union/class X;
case Decl::CXXRecord: // struct/union/class X; [C++]
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 49c2bef..0d03923 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -3388,8 +3388,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
auto SL = E->getFunctionName();
assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
StringRef FnName = CurFn->getName();
- if (FnName.starts_with("\01"))
- FnName = FnName.substr(1);
+ FnName.consume_front("\01");
StringRef NameItems[] = {
PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 3469676..428a4b8 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -4061,11 +4061,7 @@ namespace {
return false;
std::string BuiltinNameStr = BI.getName(BuiltinID);
StringRef BuiltinName = BuiltinNameStr;
- if (BuiltinName.starts_with("__builtin_") &&
- Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
- return true;
- }
- return false;
+ return BuiltinName.consume_front("__builtin_") && Name == BuiltinName;
}
bool VisitStmt(const Stmt *S) {
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index e844f0d..192f979 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -5612,19 +5612,20 @@ class ToolSelector final {
if (!BJ || !CJ)
return nullptr;
+ auto HasBitcodeInput = [](const JobActionInfo &AI) {
+ for (auto &Input : AI.JA->getInputs())
+ if (!types::isLLVMIR(Input->getType()))
+ return false;
+ return true;
+ };
+
// Check if the initial input (to the compile job or its predessor if one
// exists) is LLVM bitcode. In that case, no preprocessor step is required
// and we can still collapse the compile and backend jobs when we have
// -save-temps. I.e. there is no need for a separate compile job just to
// emit unoptimized bitcode.
- bool InputIsBitcode = true;
- for (size_t i = 1; i < ActionInfo.size(); i++)
- if (ActionInfo[i].JA->getType() != types::TY_LLVM_BC &&
- ActionInfo[i].JA->getType() != types::TY_LTO_BC) {
- InputIsBitcode = false;
- break;
- }
- if (!InputIsBitcode && !canCollapsePreprocessorAction())
+ bool InputIsBitcode = all_of(ActionInfo, HasBitcodeInput);
+ if (SaveTemps && !InputIsBitcode)
return nullptr;
// Get compiler tool.
@@ -5638,7 +5639,7 @@ class ToolSelector final {
if (!T->hasIntegratedBackend() && !(OutputIsLLVM && T->canEmitIR()))
return nullptr;
- if (T->canEmitIR() && ((SaveTemps && !InputIsBitcode) || EmbedBitcode))
+ if (T->canEmitIR() && EmbedBitcode)
return nullptr;
Inputs = CJ->getInputs();
diff --git a/clang/lib/Driver/Job.cpp b/clang/lib/Driver/Job.cpp
index f676b12..880e9e3 100644
--- a/clang/lib/Driver/Job.cpp
+++ b/clang/lib/Driver/Job.cpp
@@ -184,7 +184,7 @@ rewriteIncludes(const llvm::ArrayRef<const char *> &Args, size_t Idx,
StringRef FlagRef(Args[Idx + NumArgs - 1]);
assert((FlagRef.starts_with("-F") || FlagRef.starts_with("-I")) &&
"Expecting -I or -F");
- StringRef Inc = FlagRef.slice(2, StringRef::npos);
+ StringRef Inc = FlagRef.substr(2);
if (getAbsPath(Inc, NewInc)) {
SmallString<128> NewArg(FlagRef.slice(0, 2));
NewArg += NewInc;
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 3c52abb..664aafa 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -1441,7 +1441,7 @@ std::string ToolChain::detectLibcxxVersion(StringRef IncludePath) const {
StringRef VersionText = llvm::sys::path::filename(LI->path());
int Version;
if (VersionText[0] == 'v' &&
- !VersionText.slice(1, StringRef::npos).getAsInteger(10, Version)) {
+ !VersionText.substr(1).getAsInteger(10, Version)) {
if (Version > MaxVersion) {
MaxVersion = Version;
MaxVersionString = std::string(VersionText);
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index a08ff04..a08bdba 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -521,7 +521,7 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
CmdArgs.push_back("-x");
if (Args.hasArg(options::OPT_rewrite_objc))
- CmdArgs.push_back(types::getTypeName(types::TY_PP_ObjCXX));
+ CmdArgs.push_back(types::getTypeName(types::TY_ObjCXX));
else {
// Map the driver type to the frontend type. This is mostly an identity
// mapping, except that the distinction between module interface units
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index fe3952e..26e24ad0 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -1411,8 +1411,8 @@ StringRef Darwin::getSDKName(StringRef isysroot) {
auto EndSDK = llvm::sys::path::rend(isysroot);
for (auto IT = BeginSDK; IT != EndSDK; ++IT) {
StringRef SDK = *IT;
- if (SDK.ends_with(".sdk"))
- return SDK.slice(0, SDK.size() - 4);
+ if (SDK.consume_back(".sdk"))
+ return SDK;
}
return "";
}
diff --git a/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp b/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
index 6f42b36..764c345 100644
--- a/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
+++ b/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
@@ -305,8 +305,7 @@ public:
auto DefLoc = MI->getDefinitionLoc();
- if (SM.isWrittenInBuiltinFile(DefLoc) ||
- SM.isWrittenInCommandLineFile(DefLoc))
+ if (SM.isInPredefinedFile(DefLoc))
continue;
auto AssociatedModuleMacros = MD.getModuleMacros();
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index f09eb98..54a2e3e 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -100,11 +100,7 @@ public:
unsigned Column;
bool operator<(const Position &other) const {
- if (Line < other.Line)
- return true;
- if (Line > other.Line)
- return false;
- return Column < other.Column;
+ return std::tie(Line, Column) < std::tie(other.Line, other.Column);
}
static Position GetBeginSpelling(const SourceManager &SM,
diff --git a/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 2ae355f..22ba4ce 100644
--- a/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -569,8 +569,7 @@ void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
SourceLocation DefLoc = MI->getDefinitionLoc();
if (DirectivesOnly && !MI->isUsed()) {
SourceManager &SM = PP.getSourceManager();
- if (SM.isWrittenInBuiltinFile(DefLoc) ||
- SM.isWrittenInCommandLineFile(DefLoc))
+ if (SM.isInPredefinedFile(DefLoc))
return;
}
MoveToLine(DefLoc, /*RequireStartOfLine=*/true);
diff --git a/clang/lib/Headers/cuda_wrappers/cmath b/clang/lib/Headers/cuda_wrappers/cmath
index 7deca67..8e9ee34 100644
--- a/clang/lib/Headers/cuda_wrappers/cmath
+++ b/clang/lib/Headers/cuda_wrappers/cmath
@@ -39,56 +39,6 @@
__attribute__((device)) long double logb(long double);
__attribute__((device)) long double scalbn(long double, int);
-namespace std {
-
-// For __constexpr_fmin/fmax we only need device-side overloads before c++14
-// where they are not constexpr.
-#if _LIBCPP_STD_VER < 14
-
-__attribute__((device))
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 float __constexpr_fmax(float __x, float __y) _NOEXCEPT {
- return __builtin_fmaxf(__x, __y);
-}
-
-__attribute__((device))
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 double __constexpr_fmax(double __x, double __y) _NOEXCEPT {
- return __builtin_fmax(__x, __y);
-}
-
-__attribute__((device))
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 long double
-__constexpr_fmax(long double __x, long double __y) _NOEXCEPT {
- return __builtin_fmaxl(__x, __y);
-}
-
-template <class _Tp, class _Up, __enable_if_t<is_arithmetic<_Tp>::value && is_arithmetic<_Up>::value, int> = 0>
-__attribute__((device))
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename __promote<_Tp, _Up>::type
-__constexpr_fmax(_Tp __x, _Up __y) _NOEXCEPT {
- using __result_type = typename __promote<_Tp, _Up>::type;
- return std::__constexpr_fmax(static_cast<__result_type>(__x), static_cast<__result_type>(__y));
-}
-#endif // _LIBCPP_STD_VER < 14
-
-// For logb/scalbn templates we must always provide device overloads because
-// libc++ implementation uses __builtin_XXX which gets translated into a libcall
-// which we can't handle on GPU. We need to forward those to CUDA-provided
-// implementations.
-
-template <class _Tp>
-__attribute__((device))
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __constexpr_logb(_Tp __x) {
- return ::logb(__x);
-}
-
-template <class _Tp>
-__attribute__((device))
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Tp __constexpr_scalbn(_Tp __x, int __exp) {
- return ::scalbn(__x, __exp);
-}
-
-} // namespace std//
-
#endif // _LIBCPP_STD_VER
#endif // include guard
diff --git a/clang/lib/Lex/PPDirectives.cpp b/clang/lib/Lex/PPDirectives.cpp
index 384d167..c2bab91 100644
--- a/clang/lib/Lex/PPDirectives.cpp
+++ b/clang/lib/Lex/PPDirectives.cpp
@@ -374,9 +374,8 @@ bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
// Macro names with reserved identifiers are accepted if built-in or passed
// through the command line (the later may be present if -dD was used to
// generate the preprocessed file).
- bool IsBuiltinOrCmd = SourceMgr.isWrittenInBuiltinFile(MacroNameLoc) ||
- SourceMgr.isWrittenInCommandLineFile(MacroNameLoc);
- if (!IsBuiltinOrCmd && !SourceMgr.isInSystemHeader(MacroNameLoc)) {
+ if (!SourceMgr.isInPredefinedFile(MacroNameLoc) &&
+ !SourceMgr.isInSystemHeader(MacroNameLoc)) {
MacroDiag D = MD_NoWarn;
if (isDefineUndef == MU_Define) {
D = shouldWarnOnMacroDef(*this, II);
@@ -1706,8 +1705,7 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
// If a filename was present, read any flags that are present.
if (ReadLineMarkerFlags(IsFileEntry, IsFileExit, FileKind, *this))
return;
- if (!SourceMgr.isWrittenInBuiltinFile(DigitTok.getLocation()) &&
- !SourceMgr.isWrittenInCommandLineFile(DigitTok.getLocation()))
+ if (!SourceMgr.isInPredefinedFile(DigitTok.getLocation()))
Diag(StrTok, diag::ext_pp_gnu_line_directive);
// Exiting to an empty string means pop to the including file, so leave
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index 2aa7a5b..f121633 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -21,10 +21,12 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/LiteralSupport.h"
+#include "clang/Parse/ParseHLSLRootSignature.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/EnterExpressionEvaluationContext.h"
+#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaCodeCompletion.h"
@@ -5311,6 +5313,90 @@ void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
}
}
+void Parser::ParseMicrosoftRootSignatureAttributeArgs(ParsedAttributes &Attrs) {
+ assert(Tok.is(tok::identifier) &&
+ "Expected an identifier to denote which MS attribute to consider");
+ IdentifierInfo *RootSignatureIdent = Tok.getIdentifierInfo();
+ assert(RootSignatureIdent->getName() == "RootSignature" &&
+ "Expected RootSignature identifier for root signature attribute");
+
+ SourceLocation RootSignatureLoc = Tok.getLocation();
+ ConsumeToken();
+
+ // Ignore the left paren location for now.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ auto ProcessStringLiteral = [this]() -> std::optional<StringLiteral *> {
+ if (!isTokenStringLiteral())
+ return std::nullopt;
+
+ ExprResult StringResult = ParseUnevaluatedStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return std::nullopt;
+
+ if (auto Lit = dyn_cast<StringLiteral>(StringResult.get()))
+ return Lit;
+
+ return std::nullopt;
+ };
+
+ auto StrLiteral = ProcessStringLiteral();
+ if (!StrLiteral.has_value()) {
+ Diag(Tok, diag::err_expected_string_literal)
+ << /*in attributes...*/ 4 << RootSignatureIdent->getName();
+ SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
+ T.consumeClose();
+ return;
+ }
+
+ // Construct our identifier
+ StringRef Signature = StrLiteral.value()->getString();
+ auto Hash = llvm::hash_value(Signature);
+ std::string IdStr = "__hlsl_rootsig_decl_" + std::to_string(Hash);
+ IdentifierInfo *DeclIdent = &(Actions.getASTContext().Idents.get(IdStr));
+
+ LookupResult R(Actions, DeclIdent, SourceLocation(),
+ Sema::LookupOrdinaryName);
+ // Check if we have already found a decl of the same name, if we haven't
+ // then parse the root signature string and construct the in-memory elements
+ if (!Actions.LookupQualifiedName(R, Actions.CurContext)) {
+ SourceLocation SignatureLoc =
+ StrLiteral.value()->getExprLoc().getLocWithOffset(
+ 1); // offset 1 for '"'
+ // Invoke the root signature parser to construct the in-memory constructs
+ hlsl::RootSignatureLexer Lexer(Signature, SignatureLoc);
+ SmallVector<llvm::hlsl::rootsig::RootElement> RootElements;
+ hlsl::RootSignatureParser Parser(RootElements, Lexer, PP);
+ if (Parser.parse()) {
+ T.consumeClose();
+ return;
+ }
+
+ // Create the Root Signature
+ auto *SignatureDecl = HLSLRootSignatureDecl::Create(
+ Actions.getASTContext(), /*DeclContext=*/Actions.CurContext,
+ RootSignatureLoc, DeclIdent, RootElements);
+ SignatureDecl->setImplicit();
+ Actions.PushOnScopeChains(SignatureDecl, getCurScope());
+ }
+
+ // Create the arg for the ParsedAttr
+ IdentifierLoc *ILoc = ::new (Actions.getASTContext())
+ IdentifierLoc(RootSignatureLoc, DeclIdent);
+
+ ArgsVector Args = {ILoc};
+
+ if (!T.consumeClose())
+ Attrs.addNew(RootSignatureIdent,
+ SourceRange(RootSignatureLoc, T.getCloseLocation()), nullptr,
+ SourceLocation(), Args.data(), Args.size(),
+ ParsedAttr::Form::Microsoft());
+}
+
/// ParseMicrosoftAttributes - Parse Microsoft attributes [Attr]
///
/// [MS] ms-attribute:
@@ -5345,6 +5431,8 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &Attrs) {
break;
if (Tok.getIdentifierInfo()->getName() == "uuid")
ParseMicrosoftUuidAttributeArgs(Attrs);
+ else if (Tok.getIdentifierInfo()->getName() == "RootSignature")
+ ParseMicrosoftRootSignatureAttributeArgs(Attrs);
else {
IdentifierInfo *II = Tok.getIdentifierInfo();
SourceLocation NameLoc = Tok.getLocation();
diff --git a/clang/lib/Parse/ParseOpenACC.cpp b/clang/lib/Parse/ParseOpenACC.cpp
index 3843e1a..e1da86a 100644
--- a/clang/lib/Parse/ParseOpenACC.cpp
+++ b/clang/lib/Parse/ParseOpenACC.cpp
@@ -1429,6 +1429,11 @@ Parser::ParseOpenACCBindClauseArgument() {
return II;
}
+ if (!tok::isStringLiteral(getCurToken().getKind())) {
+ Diag(getCurToken(), diag::err_acc_incorrect_bind_arg);
+ return std::monostate{};
+ }
+
ExprResult Res =
getActions().CorrectDelayedTyposInExpr(ParseStringLiteralExpression(
/*AllowUserDefinedLiteral=*/false, /*Unevaluated=*/true));
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index 2508bf5..85838fe 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -668,6 +668,72 @@ TypeResult Parser::parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclaratorInfo);
}
+/// Parses 'omp begin declare variant' directive.
+// The syntax is:
+// { #pragma omp begin declare variant clause }
+// <function-declaration-or-definition-sequence>
+// { #pragma omp end declare variant }
+//
+bool Parser::ParseOpenMPDeclareBeginVariantDirective(SourceLocation Loc) {
+ OMPTraitInfo *ParentTI =
+ Actions.OpenMP().getOMPTraitInfoForSurroundingScope();
+ ASTContext &ASTCtx = Actions.getASTContext();
+ OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo();
+ if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI)) {
+ while (!SkipUntil(tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
+ ;
+ // Skip the last annot_pragma_openmp_end.
+ (void)ConsumeAnnotationToken();
+ return true;
+ }
+
+ // Skip last tokens.
+ skipUntilPragmaOpenMPEnd(OMPD_begin_declare_variant);
+
+ ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
+
+ VariantMatchInfo VMI;
+ TI.getAsVariantMatchInfo(ASTCtx, VMI);
+
+ std::function<void(StringRef)> DiagUnknownTrait = [this,
+ Loc](StringRef ISATrait) {
+ // TODO Track the selector locations in a way that is accessible here
+ // to improve the diagnostic location.
+ Diag(Loc, diag::warn_unknown_declare_variant_isa_trait) << ISATrait;
+ };
+ TargetOMPContext OMPCtx(
+ ASTCtx, std::move(DiagUnknownTrait),
+ /* CurrentFunctionDecl */ nullptr,
+ /* ConstructTraits */ ArrayRef<llvm::omp::TraitProperty>(),
+ Actions.OpenMP().getOpenMPDeviceNum());
+
+ if (isVariantApplicableInContext(VMI, OMPCtx,
+ /*DeviceOrImplementationSetOnly=*/true)) {
+ Actions.OpenMP().ActOnOpenMPBeginDeclareVariant(Loc, TI);
+ return false;
+ }
+
+ // Elide all the code till the matching end declare variant was found.
+ unsigned Nesting = 1;
+ SourceLocation DKLoc;
+ OpenMPDirectiveKind DK = OMPD_unknown;
+ do {
+ DKLoc = Tok.getLocation();
+ DK = parseOpenMPDirectiveKind(*this);
+ if (DK == OMPD_end_declare_variant)
+ --Nesting;
+ else if (DK == OMPD_begin_declare_variant)
+ ++Nesting;
+ if (!Nesting || isEofOrEom())
+ break;
+ ConsumeAnyToken();
+ } while (true);
+
+ parseOMPEndDirective(OMPD_begin_declare_variant, OMPD_end_declare_variant, DK,
+ Loc, DKLoc, /* SkipUntilOpenMPEnd */ true);
+ return false;
+}
+
namespace {
/// RAII that recreates function context for correct parsing of clauses of
/// 'declare simd' construct.
@@ -2244,79 +2310,23 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
break;
}
case OMPD_begin_declare_variant: {
- // The syntax is:
- // { #pragma omp begin declare variant clause }
- // <function-declaration-or-definition-sequence>
- // { #pragma omp end declare variant }
- //
ConsumeToken();
- OMPTraitInfo *ParentTI =
- Actions.OpenMP().getOMPTraitInfoForSurroundingScope();
- ASTContext &ASTCtx = Actions.getASTContext();
- OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo();
- if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI)) {
- while (!SkipUntil(tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
+ if (!ParseOpenMPDeclareBeginVariantDirective(Loc)) {
// Skip the last annot_pragma_openmp_end.
- (void)ConsumeAnnotationToken();
- break;
+ if (!isEofOrEom())
+ ConsumeAnnotationToken();
}
-
- // Skip last tokens.
- skipUntilPragmaOpenMPEnd(OMPD_begin_declare_variant);
-
- ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
-
- VariantMatchInfo VMI;
- TI.getAsVariantMatchInfo(ASTCtx, VMI);
-
- std::function<void(StringRef)> DiagUnknownTrait =
- [this, Loc](StringRef ISATrait) {
- // TODO Track the selector locations in a way that is accessible here
- // to improve the diagnostic location.
- Diag(Loc, diag::warn_unknown_declare_variant_isa_trait) << ISATrait;
- };
- TargetOMPContext OMPCtx(
- ASTCtx, std::move(DiagUnknownTrait),
- /* CurrentFunctionDecl */ nullptr,
- /* ConstructTraits */ ArrayRef<llvm::omp::TraitProperty>(),
- Actions.OpenMP().getOpenMPDeviceNum());
-
- if (isVariantApplicableInContext(VMI, OMPCtx,
- /*DeviceOrImplementationSetOnly=*/true)) {
- Actions.OpenMP().ActOnOpenMPBeginDeclareVariant(Loc, TI);
- break;
- }
-
- // Elide all the code till the matching end declare variant was found.
- unsigned Nesting = 1;
- SourceLocation DKLoc;
- OpenMPDirectiveKind DK = OMPD_unknown;
- do {
- DKLoc = Tok.getLocation();
- DK = parseOpenMPDirectiveKind(*this);
- if (DK == OMPD_end_declare_variant)
- --Nesting;
- else if (DK == OMPD_begin_declare_variant)
- ++Nesting;
- if (!Nesting || isEofOrEom())
- break;
- ConsumeAnyToken();
- } while (true);
-
- parseOMPEndDirective(OMPD_begin_declare_variant, OMPD_end_declare_variant,
- DK, Loc, DKLoc, /* SkipUntilOpenMPEnd */ true);
- if (isEofOrEom())
- return nullptr;
- break;
+ return nullptr;
}
case OMPD_end_declare_variant: {
+ ConsumeToken();
if (Actions.OpenMP().isInOpenMPDeclareVariantScope())
Actions.OpenMP().ActOnOpenMPEndDeclareVariant();
else
Diag(Loc, diag::err_expected_begin_declare_variant);
- ConsumeToken();
- break;
+ // Skip the last annot_pragma_openmp_end.
+ ConsumeAnnotationToken();
+ return nullptr;
}
case OMPD_declare_variant:
case OMPD_declare_simd: {
@@ -2487,7 +2497,7 @@ StmtResult Parser::ParseOpenMPExecutableDirective(
} else if (DKind == OMPD_cancellation_point || DKind == OMPD_cancel) {
CancelRegion = parseOpenMPDirectiveKind(*this);
if (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeToken();
+ ConsumeAnyToken();
}
if (isOpenMPLoopDirective(DKind))
@@ -3028,12 +3038,28 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
Actions.OpenMP().ActOnFinishedOpenMPDeclareTargetContext(DTCI);
break;
}
+ case OMPD_begin_declare_variant: {
+ ConsumeToken();
+ if (!ParseOpenMPDeclareBeginVariantDirective(Loc)) {
+ // Skip the last annot_pragma_openmp_end.
+ if (!isEofOrEom())
+ ConsumeAnnotationToken();
+ }
+ return Directive;
+ }
+ case OMPD_end_declare_variant: {
+ ConsumeToken();
+ if (Actions.OpenMP().isInOpenMPDeclareVariantScope())
+ Actions.OpenMP().ActOnOpenMPEndDeclareVariant();
+ else
+ Diag(Loc, diag::err_expected_begin_declare_variant);
+ ConsumeAnnotationToken();
+ break;
+ }
case OMPD_declare_simd:
case OMPD_begin_declare_target:
case OMPD_end_declare_target:
case OMPD_requires:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
case OMPD_declare_variant:
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind, OMPVersion);
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index b880123..3775956 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -7481,6 +7481,9 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
break;
// HLSL attributes:
+ case ParsedAttr::AT_RootSignature:
+ S.HLSL().handleRootSignatureAttr(D, AL);
+ break;
case ParsedAttr::AT_HLSLNumThreads:
S.HLSL().handleNumThreadsAttr(D, AL);
break;
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 08bd22a..744ec43 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -29,6 +29,7 @@
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/Template.h"
@@ -950,6 +951,32 @@ void SemaHLSL::emitLogicalOperatorFixIt(Expr *LHS, Expr *RHS,
<< NewFnName << FixItHint::CreateReplacement(FullRange, OS.str());
}
+void SemaHLSL::handleRootSignatureAttr(Decl *D, const ParsedAttr &AL) {
+ if (AL.getNumArgs() != 1) {
+ Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
+ return;
+ }
+
+ IdentifierInfo *Ident = AL.getArgAsIdent(0)->getIdentifierInfo();
+ if (auto *RS = D->getAttr<RootSignatureAttr>()) {
+ if (RS->getSignature() != Ident) {
+ Diag(AL.getLoc(), diag::err_disallowed_duplicate_attribute) << RS;
+ return;
+ }
+
+ Diag(AL.getLoc(), diag::warn_duplicate_attribute_exact) << RS;
+ return;
+ }
+
+ LookupResult R(SemaRef, Ident, SourceLocation(), Sema::LookupOrdinaryName);
+ if (SemaRef.LookupQualifiedName(R, D->getDeclContext()))
+ if (isa<HLSLRootSignatureDecl>(R.getFoundDecl())) {
+ // Perform validation of constructs here
+ D->addAttr(::new (getASTContext())
+ RootSignatureAttr(getASTContext(), AL, Ident));
+ }
+}
+
void SemaHLSL::handleNumThreadsAttr(Decl *D, const ParsedAttr &AL) {
llvm::VersionTuple SMVersion =
getASTContext().getTargetInfo().getTriple().getOSVersion();
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 15d568f..be6ce97 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -9648,6 +9648,13 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
DSAStackTy &DSA,
SemaOpenMP::VarsWithInheritedDSAType &VarsWithImplicitDSA,
OMPLoopBasedDirective::HelperExprs &Built) {
+ // If either of the loop expressions exist and contain errors, we bail out
+ // early because diagnostics have already been emitted and we can't reliably
+ // check more about the loop.
+ if ((CollapseLoopCountExpr && CollapseLoopCountExpr->containsErrors()) ||
+ (OrderedLoopCountExpr && OrderedLoopCountExpr->containsErrors()))
+ return 0;
+
unsigned NestedLoopCount = 1;
bool SupportsNonPerfectlyNested = (SemaRef.LangOpts.OpenMP >= 50) &&
!isOpenMPLoopTransformationDirective(DKind);
@@ -15937,6 +15944,13 @@ ExprResult SemaOpenMP::VerifyPositiveIntegerConstantInClause(
<< E->getSourceRange();
return ExprError();
}
+
+ if (!Result.isRepresentableByInt64()) {
+ Diag(E->getExprLoc(), diag::err_omp_large_expression_in_clause)
+ << getOpenMPClauseNameForDiag(CKind) << E->getSourceRange();
+ return ExprError();
+ }
+
if (CKind == OMPC_collapse && DSAStack->getAssociatedLoops() == 1)
DSAStack->setAssociatedLoops(Result.getExtValue());
else if (CKind == OMPC_ordered)
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 08b3a42..01065f2 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -999,6 +999,11 @@ Decl *TemplateDeclInstantiator::VisitHLSLBufferDecl(HLSLBufferDecl *Decl) {
llvm_unreachable("HLSL buffer declarations cannot be instantiated");
}
+Decl *TemplateDeclInstantiator::VisitHLSLRootSignatureDecl(
+ HLSLRootSignatureDecl *Decl) {
+ llvm_unreachable("HLSL root signature declarations cannot be instantiated");
+}
+
Decl *
TemplateDeclInstantiator::VisitPragmaCommentDecl(PragmaCommentDecl *D) {
llvm_unreachable("pragma comment cannot be instantiated");
diff --git a/clang/lib/Serialization/ASTCommon.cpp b/clang/lib/Serialization/ASTCommon.cpp
index ad277f1..76eb869 100644
--- a/clang/lib/Serialization/ASTCommon.cpp
+++ b/clang/lib/Serialization/ASTCommon.cpp
@@ -458,6 +458,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::RequiresExprBody:
case Decl::UnresolvedUsingIfExists:
case Decl::HLSLBuffer:
+ case Decl::HLSLRootSignature:
case Decl::OpenACCDeclare:
case Decl::OpenACCRoutine:
return false;
diff --git a/clang/lib/Serialization/ModuleManager.cpp b/clang/lib/Serialization/ModuleManager.cpp
index 7f3f246..fa9533b 100644
--- a/clang/lib/Serialization/ModuleManager.cpp
+++ b/clang/lib/Serialization/ModuleManager.cpp
@@ -124,9 +124,9 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// Note: ExpectedSize and ExpectedModTime will be 0 for MK_ImplicitModule
// when using an ASTFileSignature.
if (lookupModuleFile(FileName, ExpectedSize, ExpectedModTime, Entry)) {
- ErrorStr = IgnoreModTime
- ? "module file has a different size than expected"
- : "module file has a different size or mtime than expected";
+ ErrorStr = IgnoreModTime ? "module file has a different size than expected"
+ : "module file has a different size or "
+ "modification time than expected";
return OutOfDate;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index f6b5ad8..9bcaad1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -273,28 +273,29 @@ public:
/// Invalidate the destination buffer determined by characters copied.
static ProgramStateRef
invalidateDestinationBufferBySize(CheckerContext &C, ProgramStateRef S,
- const Expr *BufE, SVal BufV, SVal SizeV,
- QualType SizeTy);
+ const Expr *BufE, ConstCFGElementRef Elem,
+ SVal BufV, SVal SizeV, QualType SizeTy);
/// Operation never overflows, do not invalidate the super region.
static ProgramStateRef invalidateDestinationBufferNeverOverflows(
- CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV);
+ CheckerContext &C, ProgramStateRef S, ConstCFGElementRef Elem, SVal BufV);
/// We do not know whether the operation can overflow (e.g. size is unknown),
/// invalidate the super region and escape related pointers.
static ProgramStateRef invalidateDestinationBufferAlwaysEscapeSuperRegion(
- CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV);
+ CheckerContext &C, ProgramStateRef S, ConstCFGElementRef Elem, SVal BufV);
/// Invalidate the source buffer for escaping pointers.
static ProgramStateRef invalidateSourceBuffer(CheckerContext &C,
ProgramStateRef S,
- const Expr *BufE, SVal BufV);
+ ConstCFGElementRef Elem,
+ SVal BufV);
/// @param InvalidationTraitOperations Determine how to invlidate the
/// MemRegion by setting the invalidation traits. Return true to cause pointer
/// escape, or false otherwise.
static ProgramStateRef invalidateBufferAux(
- CheckerContext &C, ProgramStateRef State, const Expr *Ex, SVal V,
+ CheckerContext &C, ProgramStateRef State, ConstCFGElementRef Elem, SVal V,
llvm::function_ref<bool(RegionAndSymbolInvalidationTraits &,
const MemRegion *)>
InvalidationTraitOperations);
@@ -302,8 +303,8 @@ public:
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
- static bool memsetAux(const Expr *DstBuffer, SVal CharE,
- const Expr *Size, CheckerContext &C,
+ static bool memsetAux(const Expr *DstBuffer, ConstCFGElementRef Elem,
+ SVal CharE, const Expr *Size, CheckerContext &C,
ProgramStateRef &State);
// Re-usable checks
@@ -1211,8 +1212,8 @@ bool CStringChecker::isFirstBufInBound(CheckerContext &C, ProgramStateRef State,
}
ProgramStateRef CStringChecker::invalidateDestinationBufferBySize(
- CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV,
- SVal SizeV, QualType SizeTy) {
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE,
+ ConstCFGElementRef Elem, SVal BufV, SVal SizeV, QualType SizeTy) {
auto InvalidationTraitOperations =
[&C, S, BufTy = BufE->getType(), BufV, SizeV,
SizeTy](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
@@ -1227,22 +1228,22 @@ ProgramStateRef CStringChecker::invalidateDestinationBufferBySize(
return false;
};
- return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+ return invalidateBufferAux(C, S, Elem, BufV, InvalidationTraitOperations);
}
ProgramStateRef
CStringChecker::invalidateDestinationBufferAlwaysEscapeSuperRegion(
- CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV) {
+ CheckerContext &C, ProgramStateRef S, ConstCFGElementRef Elem, SVal BufV) {
auto InvalidationTraitOperations = [](RegionAndSymbolInvalidationTraits &,
const MemRegion *R) {
return isa<FieldRegion>(R);
};
- return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+ return invalidateBufferAux(C, S, Elem, BufV, InvalidationTraitOperations);
}
ProgramStateRef CStringChecker::invalidateDestinationBufferNeverOverflows(
- CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV) {
+ CheckerContext &C, ProgramStateRef S, ConstCFGElementRef Elem, SVal BufV) {
auto InvalidationTraitOperations =
[](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
if (MemRegion::FieldRegionKind == R->getKind())
@@ -1252,12 +1253,12 @@ ProgramStateRef CStringChecker::invalidateDestinationBufferNeverOverflows(
return false;
};
- return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+ return invalidateBufferAux(C, S, Elem, BufV, InvalidationTraitOperations);
}
ProgramStateRef CStringChecker::invalidateSourceBuffer(CheckerContext &C,
ProgramStateRef S,
- const Expr *BufE,
+ ConstCFGElementRef Elem,
SVal BufV) {
auto InvalidationTraitOperations =
[](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
@@ -1269,11 +1270,11 @@ ProgramStateRef CStringChecker::invalidateSourceBuffer(CheckerContext &C,
return true;
};
- return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+ return invalidateBufferAux(C, S, Elem, BufV, InvalidationTraitOperations);
}
ProgramStateRef CStringChecker::invalidateBufferAux(
- CheckerContext &C, ProgramStateRef State, const Expr *E, SVal V,
+ CheckerContext &C, ProgramStateRef State, ConstCFGElementRef Elem, SVal V,
llvm::function_ref<bool(RegionAndSymbolInvalidationTraits &,
const MemRegion *)>
InvalidationTraitOperations) {
@@ -1299,7 +1300,7 @@ ProgramStateRef CStringChecker::invalidateBufferAux(
RegionAndSymbolInvalidationTraits ITraits;
bool CausesPointerEscape = InvalidationTraitOperations(ITraits, R);
- return State->invalidateRegions(R, E, C.blockCount(), LCtx,
+ return State->invalidateRegions(R, Elem, C.blockCount(), LCtx,
CausesPointerEscape, nullptr, nullptr,
&ITraits);
}
@@ -1349,9 +1350,9 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
}
}
-bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
- const Expr *Size, CheckerContext &C,
- ProgramStateRef &State) {
+bool CStringChecker::memsetAux(const Expr *DstBuffer, ConstCFGElementRef Elem,
+ SVal CharVal, const Expr *Size,
+ CheckerContext &C, ProgramStateRef &State) {
SVal MemVal = C.getSVal(DstBuffer);
SVal SizeVal = C.getSVal(Size);
const MemRegion *MR = MemVal.getAsRegion();
@@ -1404,8 +1405,8 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
} else {
// If the destination buffer's extent is not equal to the value of
// third argument, just invalidate buffer.
- State = invalidateDestinationBufferBySize(C, State, DstBuffer, MemVal,
- SizeVal, Size->getType());
+ State = invalidateDestinationBufferBySize(
+ C, State, DstBuffer, Elem, MemVal, SizeVal, Size->getType());
}
if (StateNullChar && !StateNonNullChar) {
@@ -1430,7 +1431,7 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
} else {
// If the offset is not zero and char value is not concrete, we can do
// nothing but invalidate the buffer.
- State = invalidateDestinationBufferBySize(C, State, DstBuffer, MemVal,
+ State = invalidateDestinationBufferBySize(C, State, DstBuffer, Elem, MemVal,
SizeVal, Size->getType());
}
return true;
@@ -1531,12 +1532,12 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallEvent &Call,
// This would probably remove any existing bindings past the end of the
// copied region, but that's still an improvement over blank invalidation.
state = invalidateDestinationBufferBySize(
- C, state, Dest.Expression, C.getSVal(Dest.Expression), sizeVal,
- Size.Expression->getType());
+ C, state, Dest.Expression, Call.getCFGElementRef(),
+ C.getSVal(Dest.Expression), sizeVal, Size.Expression->getType());
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = invalidateSourceBuffer(C, state, Source.Expression,
+ state = invalidateSourceBuffer(C, state, Call.getCFGElementRef(),
C.getSVal(Source.Expression));
C.addTransition(state);
@@ -2231,13 +2232,13 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallEvent &Call,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
- state = invalidateDestinationBufferBySize(C, state, Dst.Expression,
- *dstRegVal, amountCopied,
- C.getASTContext().getSizeType());
+ state = invalidateDestinationBufferBySize(
+ C, state, Dst.Expression, Call.getCFGElementRef(), *dstRegVal,
+ amountCopied, C.getASTContext().getSizeType());
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = invalidateSourceBuffer(C, state, srcExpr.Expression, srcVal);
+ state = invalidateSourceBuffer(C, state, Call.getCFGElementRef(), srcVal);
// Set the C string length of the destination, if we know it.
if (IsBounded && (appendK == ConcatFnKind::none)) {
@@ -2457,7 +2458,7 @@ void CStringChecker::evalStrsep(CheckerContext &C,
// character to NUL.
// As the replacement never overflows, do not invalidate its super region.
State = invalidateDestinationBufferNeverOverflows(
- C, State, SearchStrPtr.Expression, Result);
+ C, State, Call.getCFGElementRef(), Result);
// Overwrite the search string pointer. The new value is either an address
// further along in the same string, or NULL if there are no more tokens.
@@ -2505,8 +2506,8 @@ void CStringChecker::evalStdCopyCommon(CheckerContext &C,
SVal DstVal = State->getSVal(Dst, LCtx);
// FIXME: As we do not know how many items are copied, we also invalidate the
// super region containing the target location.
- State =
- invalidateDestinationBufferAlwaysEscapeSuperRegion(C, State, Dst, DstVal);
+ State = invalidateDestinationBufferAlwaysEscapeSuperRegion(
+ C, State, Call.getCFGElementRef(), DstVal);
SValBuilder &SVB = C.getSValBuilder();
@@ -2559,8 +2560,8 @@ void CStringChecker::evalMemset(CheckerContext &C,
// According to the values of the arguments, bind the value of the second
// argument to the destination buffer and set string length, or just
// invalidate the destination buffer.
- if (!memsetAux(Buffer.Expression, C.getSVal(CharE.Expression),
- Size.Expression, C, State))
+ if (!memsetAux(Buffer.Expression, Call.getCFGElementRef(),
+ C.getSVal(CharE.Expression), Size.Expression, C, State))
return;
State = State->BindExpr(Call.getOriginExpr(), LCtx, BufferPtrVal);
@@ -2604,7 +2605,8 @@ void CStringChecker::evalBzero(CheckerContext &C, const CallEvent &Call) const {
if (!State)
return;
- if (!memsetAux(Buffer.Expression, Zero, Size.Expression, C, State))
+ if (!memsetAux(Buffer.Expression, Call.getCFGElementRef(), Zero,
+ Size.Expression, C, State))
return;
C.addTransition(State);
diff --git a/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
index d850344..3cc49e4 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
@@ -33,11 +33,11 @@ namespace {
class ContainerModeling
: public Checker<check::PostCall, check::LiveSymbols, check::DeadSymbols> {
- void handleBegin(CheckerContext &C, const Expr *CE, SVal RetVal,
+ void handleBegin(CheckerContext &C, ConstCFGElementRef Elem, SVal RetVal,
SVal Cont) const;
- void handleEnd(CheckerContext &C, const Expr *CE, SVal RetVal,
+ void handleEnd(CheckerContext &C, ConstCFGElementRef Elem, SVal RetVal,
SVal Cont) const;
- void handleAssignment(CheckerContext &C, SVal Cont, const Expr *CE = nullptr,
+ void handleAssignment(CheckerContext &C, SVal Cont, ConstCFGElementRef Elem,
SVal OldCont = UndefinedVal()) const;
void handleAssign(CheckerContext &C, SVal Cont, const Expr *ContE) const;
void handleClear(CheckerContext &C, SVal Cont, const Expr *ContE) const;
@@ -108,11 +108,12 @@ bool backModifiable(ProgramStateRef State, const MemRegion *Reg);
SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont);
SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont);
ProgramStateRef createContainerBegin(ProgramStateRef State,
- const MemRegion *Cont, const Expr *E,
- QualType T, const LocationContext *LCtx,
+ const MemRegion *Cont,
+ ConstCFGElementRef Elem, QualType T,
+ const LocationContext *LCtx,
unsigned BlockCount);
ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
- const Expr *E, QualType T,
+ ConstCFGElementRef Elem, QualType T,
const LocationContext *LCtx,
unsigned BlockCount);
ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
@@ -163,12 +164,12 @@ void ContainerModeling::checkPostCall(const CallEvent &Call,
return;
if (cast<CXXMethodDecl>(Func)->isMoveAssignmentOperator()) {
- handleAssignment(C, InstCall->getCXXThisVal(), Call.getOriginExpr(),
- Call.getArgSVal(0));
+ handleAssignment(C, InstCall->getCXXThisVal(), Call.getCFGElementRef(),
+ Call.getArgSVal(0));
return;
}
- handleAssignment(C, InstCall->getCXXThisVal());
+ handleAssignment(C, InstCall->getCXXThisVal(), C.getCFGElementRef());
return;
}
} else {
@@ -198,13 +199,13 @@ void ContainerModeling::checkPostCall(const CallEvent &Call,
return;
if (isBeginCall(Func)) {
- handleBegin(C, OrigExpr, Call.getReturnValue(),
+ handleBegin(C, Call.getCFGElementRef(), Call.getReturnValue(),
InstCall->getCXXThisVal());
return;
}
if (isEndCall(Func)) {
- handleEnd(C, OrigExpr, Call.getReturnValue(),
+ handleEnd(C, Call.getCFGElementRef(), Call.getReturnValue(),
InstCall->getCXXThisVal());
return;
}
@@ -250,8 +251,8 @@ void ContainerModeling::checkDeadSymbols(SymbolReaper &SR,
C.addTransition(State);
}
-void ContainerModeling::handleBegin(CheckerContext &C, const Expr *CE,
- SVal RetVal, SVal Cont) const {
+void ContainerModeling::handleBegin(CheckerContext &C, ConstCFGElementRef Elem,
+ SVal RetVal, SVal Cont) const {
const auto *ContReg = Cont.getAsRegion();
if (!ContReg)
return;
@@ -263,7 +264,7 @@ void ContainerModeling::handleBegin(CheckerContext &C, const Expr *CE,
auto State = C.getState();
auto BeginSym = getContainerBegin(State, ContReg);
if (!BeginSym) {
- State = createContainerBegin(State, ContReg, CE, C.getASTContext().LongTy,
+ State = createContainerBegin(State, ContReg, Elem, C.getASTContext().LongTy,
C.getLocationContext(), C.blockCount());
BeginSym = getContainerBegin(State, ContReg);
}
@@ -272,8 +273,8 @@ void ContainerModeling::handleBegin(CheckerContext &C, const Expr *CE,
C.addTransition(State);
}
-void ContainerModeling::handleEnd(CheckerContext &C, const Expr *CE,
- SVal RetVal, SVal Cont) const {
+void ContainerModeling::handleEnd(CheckerContext &C, ConstCFGElementRef Elem,
+ SVal RetVal, SVal Cont) const {
const auto *ContReg = Cont.getAsRegion();
if (!ContReg)
return;
@@ -285,7 +286,7 @@ void ContainerModeling::handleEnd(CheckerContext &C, const Expr *CE,
auto State = C.getState();
auto EndSym = getContainerEnd(State, ContReg);
if (!EndSym) {
- State = createContainerEnd(State, ContReg, CE, C.getASTContext().LongTy,
+ State = createContainerEnd(State, ContReg, Elem, C.getASTContext().LongTy,
C.getLocationContext(), C.blockCount());
EndSym = getContainerEnd(State, ContReg);
}
@@ -295,7 +296,8 @@ void ContainerModeling::handleEnd(CheckerContext &C, const Expr *CE,
}
void ContainerModeling::handleAssignment(CheckerContext &C, SVal Cont,
- const Expr *CE, SVal OldCont) const {
+ ConstCFGElementRef Elem,
+ SVal OldCont) const {
const auto *ContReg = Cont.getAsRegion();
if (!ContReg)
return;
@@ -329,7 +331,7 @@ void ContainerModeling::handleAssignment(CheckerContext &C, SVal Cont,
auto &SVB = C.getSValBuilder();
// Then generate and assign a new "end" symbol for the new container.
auto NewEndSym =
- SymMgr.conjureSymbol(CE, C.getLocationContext(),
+ SymMgr.conjureSymbol(Elem, C.getLocationContext(),
C.getASTContext().LongTy, C.blockCount());
State = assumeNoOverflow(State, NewEndSym, 4);
if (CData) {
@@ -848,8 +850,9 @@ SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont) {
}
ProgramStateRef createContainerBegin(ProgramStateRef State,
- const MemRegion *Cont, const Expr *E,
- QualType T, const LocationContext *LCtx,
+ const MemRegion *Cont,
+ ConstCFGElementRef Elem, QualType T,
+ const LocationContext *LCtx,
unsigned BlockCount) {
// Only create if it does not exist
const auto *CDataPtr = getContainerData(State, Cont);
@@ -857,8 +860,8 @@ ProgramStateRef createContainerBegin(ProgramStateRef State,
return State;
auto &SymMgr = State->getSymbolManager();
- const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
- "begin");
+ const SymbolConjured *Sym =
+ SymMgr.conjureSymbol(Elem, LCtx, T, BlockCount, "begin");
State = assumeNoOverflow(State, Sym, 4);
if (CDataPtr) {
@@ -871,7 +874,7 @@ ProgramStateRef createContainerBegin(ProgramStateRef State,
}
ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
- const Expr *E, QualType T,
+ ConstCFGElementRef Elem, QualType T,
const LocationContext *LCtx,
unsigned BlockCount) {
// Only create if it does not exist
@@ -880,8 +883,8 @@ ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
return State;
auto &SymMgr = State->getSymbolManager();
- const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
- "end");
+ const SymbolConjured *Sym =
+ SymMgr.conjureSymbol(Elem, LCtx, T, BlockCount, "end");
State = assumeNoOverflow(State, Sym, 4);
if (CDataPtr) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
index 6ffc05f..abfc5d2 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
@@ -124,7 +124,7 @@ void ErrnoModeling::checkBeginFunction(CheckerContext &C) const {
// of the data member `ErrnoDecl` of the singleton `ErrnoModeling` checker
// object.
const SymbolConjured *Sym = SVB.conjureSymbol(
- nullptr, C.getLocationContext(),
+ C.getCFGElementRef(), C.getLocationContext(),
ACtx.getLValueReferenceType(ACtx.IntTy), C.blockCount(), &ErrnoDecl);
// The symbolic region is untyped, create a typed sub-region in it.
@@ -256,11 +256,11 @@ ProgramStateRef setErrnoForStdFailure(ProgramStateRef State, CheckerContext &C,
ProgramStateRef setErrnoStdMustBeChecked(ProgramStateRef State,
CheckerContext &C,
- const Expr *InvalE) {
+ ConstCFGElementRef Elem) {
const MemRegion *ErrnoR = State->get<ErrnoRegion>();
if (!ErrnoR)
return State;
- State = State->invalidateRegions(ErrnoR, InvalE, C.blockCount(),
+ State = State->invalidateRegions(ErrnoR, Elem, C.blockCount(),
C.getLocationContext(), false);
if (!State)
return nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h b/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
index 95da8a2..e414353 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
+++ b/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
@@ -96,9 +96,10 @@ ProgramStateRef setErrnoForStdFailure(ProgramStateRef State, CheckerContext &C,
/// Set errno state for the common case when a standard function indicates
/// failure only by \c errno. Sets \c ErrnoCheckState to \c MustBeChecked, and
/// invalidates the errno region (clear of previous value).
-/// \arg \c InvalE Expression that causes invalidation of \c errno.
+/// \arg \c Elem CFG Element that causes invalidation of \c errno.
ProgramStateRef setErrnoStdMustBeChecked(ProgramStateRef State,
- CheckerContext &C, const Expr *InvalE);
+ CheckerContext &C,
+ ConstCFGElementRef Elem);
} // namespace errno_modeling
} // namespace ento
diff --git a/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp b/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
index ba561dd..e9825b7 100644
--- a/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
@@ -207,14 +207,15 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State, SVal Val,
}
ProgramStateRef createIteratorPosition(ProgramStateRef State, SVal Val,
- const MemRegion *Cont, const Stmt *S,
+ const MemRegion *Cont,
+ ConstCFGElementRef Elem,
const LocationContext *LCtx,
unsigned blockCount) {
auto &StateMgr = State->getStateManager();
auto &SymMgr = StateMgr.getSymbolManager();
auto &ACtx = StateMgr.getContext();
- auto Sym = SymMgr.conjureSymbol(S, LCtx, ACtx.LongTy, blockCount);
+ auto *Sym = SymMgr.conjureSymbol(Elem, LCtx, ACtx.LongTy, blockCount);
State = assumeNoOverflow(State, Sym, 4);
return setIteratorPosition(State, Val,
IteratorPosition::getPosition(Cont, Sym));
diff --git a/clang/lib/StaticAnalyzer/Checkers/Iterator.h b/clang/lib/StaticAnalyzer/Checkers/Iterator.h
index 46de8ea..0a26db0 100644
--- a/clang/lib/StaticAnalyzer/Checkers/Iterator.h
+++ b/clang/lib/StaticAnalyzer/Checkers/Iterator.h
@@ -165,7 +165,8 @@ const IteratorPosition *getIteratorPosition(ProgramStateRef State, SVal Val);
ProgramStateRef setIteratorPosition(ProgramStateRef State, SVal Val,
const IteratorPosition &Pos);
ProgramStateRef createIteratorPosition(ProgramStateRef State, SVal Val,
- const MemRegion *Cont, const Stmt *S,
+ const MemRegion *Cont,
+ ConstCFGElementRef Elem,
const LocationContext *LCtx,
unsigned blockCount);
ProgramStateRef advancePosition(ProgramStateRef State, SVal Iter,
diff --git a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index d4ce73b..6139585 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -90,8 +90,9 @@ class IteratorModeling
check::PostStmt<MaterializeTemporaryExpr>,
check::Bind, check::LiveSymbols, check::DeadSymbols> {
- using AdvanceFn = void (IteratorModeling::*)(CheckerContext &, const Expr *,
- SVal, SVal, SVal) const;
+ using AdvanceFn = void (IteratorModeling::*)(CheckerContext &,
+ ConstCFGElementRef, SVal, SVal,
+ SVal) const;
void handleOverloadedOperator(CheckerContext &C, const CallEvent &Call,
OverloadedOperatorKind Op) const;
@@ -99,8 +100,9 @@ class IteratorModeling
const Expr *OrigExpr,
const AdvanceFn *Handler) const;
- void handleComparison(CheckerContext &C, const Expr *CE, SVal RetVal,
- SVal LVal, SVal RVal, OverloadedOperatorKind Op) const;
+ void handleComparison(CheckerContext &C, const Expr *CE,
+ ConstCFGElementRef Elem, SVal RetVal, SVal LVal,
+ SVal RVal, OverloadedOperatorKind Op) const;
void processComparison(CheckerContext &C, ProgramStateRef State,
SymbolRef Sym1, SymbolRef Sym2, SVal RetVal,
OverloadedOperatorKind Op) const;
@@ -108,19 +110,20 @@ class IteratorModeling
bool Postfix) const;
void handleDecrement(CheckerContext &C, SVal RetVal, SVal Iter,
bool Postfix) const;
- void handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
+ void handleRandomIncrOrDecr(CheckerContext &C, ConstCFGElementRef Elem,
OverloadedOperatorKind Op, SVal RetVal,
SVal Iterator, SVal Amount) const;
void handlePtrIncrOrDecr(CheckerContext &C, const Expr *Iterator,
- OverloadedOperatorKind OK, SVal Offset) const;
- void handleAdvance(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
- SVal Amount) const;
- void handlePrev(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
- SVal Amount) const;
- void handleNext(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
- SVal Amount) const;
- void assignToContainer(CheckerContext &C, const Expr *CE, SVal RetVal,
- const MemRegion *Cont) const;
+ ConstCFGElementRef Elem, OverloadedOperatorKind OK,
+ SVal Offset) const;
+ void handleAdvance(CheckerContext &C, ConstCFGElementRef Elem, SVal RetVal,
+ SVal Iter, SVal Amount) const;
+ void handlePrev(CheckerContext &C, ConstCFGElementRef Elem, SVal RetVal,
+ SVal Iter, SVal Amount) const;
+ void handleNext(CheckerContext &C, ConstCFGElementRef Elem, SVal RetVal,
+ SVal Iter, SVal Amount) const;
+ void assignToContainer(CheckerContext &C, ConstCFGElementRef Elem,
+ SVal RetVal, const MemRegion *Cont) const;
bool noChangeInAdvance(CheckerContext &C, SVal Iter, const Expr *CE) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
const char *Sep) const override;
@@ -224,7 +227,7 @@ void IteratorModeling::checkPostCall(const CallEvent &Call,
C.getASTContext()).getTypePtr() ==
Call.getResultType().getDesugaredType(C.getASTContext()).getTypePtr()) {
if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(i))) {
- assignToContainer(C, OrigExpr, Call.getReturnValue(),
+ assignToContainer(C, Call.getCFGElementRef(), Call.getReturnValue(),
Pos->getContainer());
return;
}
@@ -255,7 +258,7 @@ void IteratorModeling::checkPostStmt(const UnaryOperator *UO,
return;
auto &SVB = C.getSValBuilder();
- handlePtrIncrOrDecr(C, UO->getSubExpr(),
+ handlePtrIncrOrDecr(C, UO->getSubExpr(), C.getCFGElementRef(),
isIncrementOperator(OK) ? OO_Plus : OO_Minus,
SVB.makeArrayIndex(1));
}
@@ -271,7 +274,7 @@ void IteratorModeling::checkPostStmt(const BinaryOperator *BO,
if (isSimpleComparisonOperator(BO->getOpcode())) {
SVal Result = State->getSVal(BO, C.getLocationContext());
- handleComparison(C, BO, Result, LVal, RVal,
+ handleComparison(C, BO, C.getCFGElementRef(), Result, LVal, RVal,
BinaryOperator::getOverloadedOperator(OK));
} else if (isRandomIncrOrDecrOperator(OK)) {
// In case of operator+ the iterator can be either on the LHS (eg.: it + 1),
@@ -284,8 +287,8 @@ void IteratorModeling::checkPostStmt(const BinaryOperator *BO,
if (!AmountExpr->getType()->isIntegralOrEnumerationType())
return;
SVal AmountVal = IsIterOnLHS ? RVal : LVal;
- handlePtrIncrOrDecr(C, IterExpr, BinaryOperator::getOverloadedOperator(OK),
- AmountVal);
+ handlePtrIncrOrDecr(C, IterExpr, C.getCFGElementRef(),
+ BinaryOperator::getOverloadedOperator(OK), AmountVal);
}
}
@@ -351,27 +354,29 @@ IteratorModeling::handleOverloadedOperator(CheckerContext &C,
OverloadedOperatorKind Op) const {
if (isSimpleComparisonOperator(Op)) {
const auto *OrigExpr = Call.getOriginExpr();
+ const auto Elem = Call.getCFGElementRef();
if (!OrigExpr)
return;
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- handleComparison(C, OrigExpr, Call.getReturnValue(),
+ handleComparison(C, OrigExpr, Elem, Call.getReturnValue(),
InstCall->getCXXThisVal(), Call.getArgSVal(0), Op);
return;
}
- handleComparison(C, OrigExpr, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getArgSVal(1), Op);
+ handleComparison(C, OrigExpr, Elem, Call.getReturnValue(),
+ Call.getArgSVal(0), Call.getArgSVal(1), Op);
return;
} else if (isRandomIncrOrDecrOperator(Op)) {
const auto *OrigExpr = Call.getOriginExpr();
+ const auto Elem = Call.getCFGElementRef();
if (!OrigExpr)
return;
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
if (Call.getNumArgs() >= 1 &&
Call.getArgExpr(0)->getType()->isIntegralOrEnumerationType()) {
- handleRandomIncrOrDecr(C, OrigExpr, Op, Call.getReturnValue(),
+ handleRandomIncrOrDecr(C, Elem, Op, Call.getReturnValue(),
InstCall->getCXXThisVal(), Call.getArgSVal(0));
return;
}
@@ -391,8 +396,8 @@ IteratorModeling::handleOverloadedOperator(CheckerContext &C,
SVal Iterator = IsIterFirst ? FirstArg : SecondArg;
SVal Amount = IsIterFirst ? SecondArg : FirstArg;
- handleRandomIncrOrDecr(C, OrigExpr, Op, Call.getReturnValue(),
- Iterator, Amount);
+ handleRandomIncrOrDecr(C, Elem, Op, Call.getReturnValue(), Iterator,
+ Amount);
return;
}
}
@@ -425,7 +430,7 @@ IteratorModeling::handleAdvanceLikeFunction(CheckerContext &C,
const Expr *OrigExpr,
const AdvanceFn *Handler) const {
if (!C.wasInlined) {
- (this->**Handler)(C, OrigExpr, Call.getReturnValue(),
+ (this->**Handler)(C, Call.getCFGElementRef(), Call.getReturnValue(),
Call.getArgSVal(0), Call.getArgSVal(1));
return;
}
@@ -436,7 +441,7 @@ IteratorModeling::handleAdvanceLikeFunction(CheckerContext &C,
if (IdInfo) {
if (IdInfo->getName() == "advance") {
if (noChangeInAdvance(C, Call.getArgSVal(0), OrigExpr)) {
- (this->**Handler)(C, OrigExpr, Call.getReturnValue(),
+ (this->**Handler)(C, Call.getCFGElementRef(), Call.getReturnValue(),
Call.getArgSVal(0), Call.getArgSVal(1));
}
}
@@ -444,7 +449,8 @@ IteratorModeling::handleAdvanceLikeFunction(CheckerContext &C,
}
void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
- SVal RetVal, SVal LVal, SVal RVal,
+ ConstCFGElementRef Elem, SVal RetVal,
+ SVal LVal, SVal RVal,
OverloadedOperatorKind Op) const {
// Record the operands and the operator of the comparison for the next
// evalAssume, if the result is a symbolic expression. If it is a concrete
@@ -467,7 +473,7 @@ void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
SymbolRef Sym;
if (!LPos || !RPos) {
auto &SymMgr = C.getSymbolManager();
- Sym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
+ Sym = SymMgr.conjureSymbol(Elem, C.getLocationContext(),
C.getASTContext().LongTy, C.blockCount());
State = assumeNoOverflow(State, Sym, 4);
}
@@ -494,7 +500,7 @@ void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
auto &SymMgr = C.getSymbolManager();
auto *LCtx = C.getLocationContext();
RetVal = nonloc::SymbolVal(SymMgr.conjureSymbol(
- CE, LCtx, C.getASTContext().BoolTy, C.blockCount()));
+ Elem, LCtx, C.getASTContext().BoolTy, C.blockCount()));
State = State->BindExpr(CE, LCtx, RetVal);
}
@@ -583,7 +589,8 @@ void IteratorModeling::handleDecrement(CheckerContext &C, SVal RetVal,
C.addTransition(State);
}
-void IteratorModeling::handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
+void IteratorModeling::handleRandomIncrOrDecr(CheckerContext &C,
+ ConstCFGElementRef Elem,
OverloadedOperatorKind Op,
SVal RetVal, SVal Iterator,
SVal Amount) const {
@@ -617,12 +624,13 @@ void IteratorModeling::handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
State = setIteratorPosition(State, TgtVal, *NewPos);
C.addTransition(State);
} else {
- assignToContainer(C, CE, TgtVal, Pos->getContainer());
+ assignToContainer(C, Elem, TgtVal, Pos->getContainer());
}
}
void IteratorModeling::handlePtrIncrOrDecr(CheckerContext &C,
const Expr *Iterator,
+ ConstCFGElementRef Elem,
OverloadedOperatorKind OK,
SVal Offset) const {
if (!isa<DefinedSVal>(Offset))
@@ -661,34 +669,35 @@ void IteratorModeling::handlePtrIncrOrDecr(CheckerContext &C,
ProgramStateRef NewState = setIteratorPosition(State, NewVal, *NewPos);
C.addTransition(NewState);
} else {
- assignToContainer(C, Iterator, NewVal, OldPos->getContainer());
+ assignToContainer(C, Elem, NewVal, OldPos->getContainer());
}
}
-void IteratorModeling::handleAdvance(CheckerContext &C, const Expr *CE,
+void IteratorModeling::handleAdvance(CheckerContext &C, ConstCFGElementRef Elem,
SVal RetVal, SVal Iter,
SVal Amount) const {
- handleRandomIncrOrDecr(C, CE, OO_PlusEqual, RetVal, Iter, Amount);
+ handleRandomIncrOrDecr(C, Elem, OO_PlusEqual, RetVal, Iter, Amount);
}
-void IteratorModeling::handlePrev(CheckerContext &C, const Expr *CE,
+void IteratorModeling::handlePrev(CheckerContext &C, ConstCFGElementRef Elem,
SVal RetVal, SVal Iter, SVal Amount) const {
- handleRandomIncrOrDecr(C, CE, OO_Minus, RetVal, Iter, Amount);
+ handleRandomIncrOrDecr(C, Elem, OO_Minus, RetVal, Iter, Amount);
}
-void IteratorModeling::handleNext(CheckerContext &C, const Expr *CE,
+void IteratorModeling::handleNext(CheckerContext &C, ConstCFGElementRef Elem,
SVal RetVal, SVal Iter, SVal Amount) const {
- handleRandomIncrOrDecr(C, CE, OO_Plus, RetVal, Iter, Amount);
+ handleRandomIncrOrDecr(C, Elem, OO_Plus, RetVal, Iter, Amount);
}
-void IteratorModeling::assignToContainer(CheckerContext &C, const Expr *CE,
- SVal RetVal,
+void IteratorModeling::assignToContainer(CheckerContext &C,
+ ConstCFGElementRef Elem, SVal RetVal,
const MemRegion *Cont) const {
Cont = Cont->getMostDerivedObjectRegion();
auto State = C.getState();
const auto *LCtx = C.getLocationContext();
- State = createIteratorPosition(State, RetVal, Cont, CE, LCtx, C.blockCount());
+ State =
+ createIteratorPosition(State, RetVal, Cont, Elem, LCtx, C.blockCount());
C.addTransition(State);
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 1c4293c..e970a89 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -1833,8 +1833,10 @@ ProgramStateRef MallocChecker::MallocBindRetVal(CheckerContext &C,
unsigned Count = C.blockCount();
SValBuilder &SVB = C.getSValBuilder();
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
- DefinedSVal RetVal = isAlloca ? SVB.getAllocaRegionVal(CE, LCtx, Count)
- : SVB.getConjuredHeapSymbolVal(CE, LCtx, Count);
+ DefinedSVal RetVal =
+ isAlloca ? SVB.getAllocaRegionVal(CE, LCtx, Count)
+ : SVB.getConjuredHeapSymbolVal(Call.getCFGElementRef(), LCtx,
+ CE->getType(), Count);
return State->BindExpr(CE, C.getLocationContext(), RetVal);
}
@@ -2304,7 +2306,7 @@ MallocChecker::FreeMemAux(CheckerContext &C, const Expr *ArgExpr,
// Assume that after memory is freed, it contains unknown values. This
// conforts languages standards, since reading from freed memory is considered
// UB and may result in arbitrary value.
- State = State->invalidateRegions({location}, Call.getOriginExpr(),
+ State = State->invalidateRegions({location}, Call.getCFGElementRef(),
C.blockCount(), C.getLocationContext(),
/*CausesPointerEscape=*/false,
/*InvalidatedSymbols=*/nullptr);
diff --git a/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
index e037719..8e19963 100644
--- a/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
@@ -25,12 +25,12 @@ using namespace iterator;
namespace {
class STLAlgorithmModeling : public Checker<eval::Call> {
- bool evalFind(CheckerContext &C, const CallExpr *CE) const;
+ bool evalFind(CheckerContext &C, const CallEvent &Call) const;
- void Find(CheckerContext &C, const CallExpr *CE, unsigned paramNum) const;
+ void Find(CheckerContext &C, const CallEvent &Call, unsigned paramNum) const;
using FnCheck = bool (STLAlgorithmModeling::*)(CheckerContext &,
- const CallExpr *) const;
+ const CallEvent &Call) const;
const CallDescriptionMap<FnCheck> Callbacks = {
{{CDM::SimpleFunc, {"std", "find"}, 3}, &STLAlgorithmModeling::evalFind},
@@ -97,11 +97,12 @@ bool STLAlgorithmModeling::evalCall(const CallEvent &Call,
if (!Handler)
return false;
- return (this->**Handler)(C, CE);
+ return (this->**Handler)(C, Call);
}
bool STLAlgorithmModeling::evalFind(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallEvent &Call) const {
+ const auto *CE = dyn_cast<CallExpr>(Call.getOriginExpr());
// std::find()-like functions either take their primary range in the first
// two parameters, or if the first parameter is "execution policy" then in
// the second and third. This means that the second parameter must always be
@@ -112,27 +113,29 @@ bool STLAlgorithmModeling::evalFind(CheckerContext &C,
// If no "execution policy" parameter is used then the first argument is the
// beginning of the range.
if (isIteratorType(CE->getArg(0)->getType())) {
- Find(C, CE, 0);
+ Find(C, Call, 0);
return true;
}
// If "execution policy" parameter is used then the second argument is the
// beginning of the range.
if (isIteratorType(CE->getArg(2)->getType())) {
- Find(C, CE, 1);
+ Find(C, Call, 1);
return true;
}
return false;
}
-void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
+void STLAlgorithmModeling::Find(CheckerContext &C, const CallEvent &Call,
unsigned paramNum) const {
+ const auto *CE = dyn_cast<CallExpr>(Call.getOriginExpr());
+ const auto &Elem = Call.getCFGElementRef();
auto State = C.getState();
auto &SVB = C.getSValBuilder();
const auto *LCtx = C.getLocationContext();
- SVal RetVal = SVB.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ SVal RetVal = SVB.conjureSymbolVal(nullptr, Elem, LCtx, C.blockCount());
SVal Param = State->getSVal(CE->getArg(paramNum), LCtx);
auto StateFound = State->BindExpr(CE, LCtx, RetVal);
@@ -144,7 +147,7 @@ void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
const auto *Pos = getIteratorPosition(State, Param);
if (Pos) {
StateFound = createIteratorPosition(StateFound, RetVal, Pos->getContainer(),
- CE, LCtx, C.blockCount());
+ Elem, LCtx, C.blockCount());
const auto *NewPos = getIteratorPosition(StateFound, RetVal);
assert(NewPos && "Failed to create new iterator position.");
@@ -166,7 +169,7 @@ void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
Pos = getIteratorPosition(State, Param);
if (Pos) {
StateFound = createIteratorPosition(StateFound, RetVal, Pos->getContainer(),
- CE, LCtx, C.blockCount());
+ Elem, LCtx, C.blockCount());
const auto *NewPos = getIteratorPosition(StateFound, RetVal);
assert(NewPos && "Failed to create new iterator position.");
@@ -199,4 +202,3 @@ void ento::registerSTLAlgorithmModeling(CheckerManager &Mgr) {
bool ento::shouldRegisterSTLAlgorithmModeling(const CheckerManager &mgr) {
return true;
}
-
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index d56e683..66b5958f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -78,10 +78,9 @@ private:
bool handleOstreamOperator(const CallEvent &Call, CheckerContext &C) const;
bool handleSwap(ProgramStateRef State, SVal First, SVal Second,
CheckerContext &C) const;
- std::pair<SVal, ProgramStateRef>
- retrieveOrConjureInnerPtrVal(ProgramStateRef State,
- const MemRegion *ThisRegion, const Expr *E,
- QualType Type, CheckerContext &C) const;
+ std::pair<SVal, ProgramStateRef> retrieveOrConjureInnerPtrVal(
+ ProgramStateRef State, const MemRegion *ThisRegion,
+ ConstCFGElementRef Elem, QualType Type, CheckerContext &C) const;
using SmartPtrMethodHandlerFn =
void (SmartPtrModeling::*)(const CallEvent &Call, CheckerContext &) const;
@@ -306,7 +305,7 @@ bool SmartPtrModeling::evalCall(const CallEvent &Call,
return false;
const auto PtrVal = C.getSValBuilder().getConjuredHeapSymbolVal(
- Call.getOriginExpr(), C.getLocationContext(),
+ Call.getCFGElementRef(), C.getLocationContext(),
getPointerTypeFromTemplateArg(Call, C), C.blockCount());
const MemRegion *ThisRegion = ThisRegionOpt->getAsRegion();
@@ -437,12 +436,12 @@ bool SmartPtrModeling::evalCall(const CallEvent &Call,
}
std::pair<SVal, ProgramStateRef> SmartPtrModeling::retrieveOrConjureInnerPtrVal(
- ProgramStateRef State, const MemRegion *ThisRegion, const Expr *E,
+ ProgramStateRef State, const MemRegion *ThisRegion, ConstCFGElementRef Elem,
QualType Type, CheckerContext &C) const {
const auto *Ptr = State->get<TrackedRegionMap>(ThisRegion);
if (Ptr)
return {*Ptr, State};
- auto Val = C.getSValBuilder().conjureSymbolVal(E, C.getLocationContext(),
+ auto Val = C.getSValBuilder().conjureSymbolVal(Elem, C.getLocationContext(),
Type, C.blockCount());
State = State->set<TrackedRegionMap>(ThisRegion, Val);
return {Val, State};
@@ -469,6 +468,7 @@ bool SmartPtrModeling::handleComparisionOp(const CallEvent &Call,
// https://en.cppreference.com/w/cpp/memory/unique_ptr/operator_cmp.
auto makeSValFor = [&C, this](ProgramStateRef State, const Expr *E,
+ ConstCFGElementRef Elem,
SVal S) -> std::pair<SVal, ProgramStateRef> {
if (S.isZeroConstant()) {
return {S, State};
@@ -477,7 +477,7 @@ bool SmartPtrModeling::handleComparisionOp(const CallEvent &Call,
assert(Reg &&
"this pointer of std::unique_ptr should be obtainable as MemRegion");
QualType Type = getInnerPointerType(C, E->getType()->getAsCXXRecordDecl());
- return retrieveOrConjureInnerPtrVal(State, Reg, E, Type, C);
+ return retrieveOrConjureInnerPtrVal(State, Reg, Elem, Type, C);
};
SVal First = Call.getArgSVal(0);
@@ -491,8 +491,10 @@ bool SmartPtrModeling::handleComparisionOp(const CallEvent &Call,
ProgramStateRef State = C.getState();
SVal FirstPtrVal, SecondPtrVal;
- std::tie(FirstPtrVal, State) = makeSValFor(State, FirstExpr, First);
- std::tie(SecondPtrVal, State) = makeSValFor(State, SecondExpr, Second);
+ std::tie(FirstPtrVal, State) =
+ makeSValFor(State, FirstExpr, Call.getCFGElementRef(), First);
+ std::tie(SecondPtrVal, State) =
+ makeSValFor(State, SecondExpr, Call.getCFGElementRef(), Second);
BinaryOperatorKind BOK =
operationKindFromOverloadedOperator(OOK, true).GetBinaryOpUnsafe();
auto RetVal = Bldr.evalBinOp(State, BOK, FirstPtrVal, SecondPtrVal,
@@ -530,7 +532,7 @@ bool SmartPtrModeling::handleOstreamOperator(const CallEvent &Call,
if (!StreamThisRegion)
return false;
State =
- State->invalidateRegions({StreamThisRegion}, Call.getOriginExpr(),
+ State->invalidateRegions({StreamThisRegion}, Call.getCFGElementRef(),
C.blockCount(), C.getLocationContext(), false);
State =
State->BindExpr(Call.getOriginExpr(), C.getLocationContext(), StreamVal);
@@ -722,7 +724,7 @@ void SmartPtrModeling::handleGet(const CallEvent &Call,
SVal InnerPointerVal;
std::tie(InnerPointerVal, State) = retrieveOrConjureInnerPtrVal(
- State, ThisRegion, Call.getOriginExpr(), Call.getResultType(), C);
+ State, ThisRegion, Call.getCFGElementRef(), Call.getResultType(), C);
State = State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
InnerPointerVal);
// TODO: Add NoteTag, for how the raw pointer got using 'get' method.
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 17227a2..3c6c312 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -619,7 +619,7 @@ class StdLibraryFunctionsChecker
const Summary &Summary,
CheckerContext &C) const override {
return errno_modeling::setErrnoStdMustBeChecked(State, C,
- Call.getOriginExpr());
+ Call.getCFGElementRef());
}
const std::string describe(CheckerContext &C) const override {
diff --git a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 80969ce..6481b76 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -224,18 +224,16 @@ SVal getStreamArg(const FnDescription *Desc, const CallEvent &Call) {
}
/// Create a conjured symbol return value for a call expression.
-DefinedSVal makeRetVal(CheckerContext &C, const CallExpr *CE) {
- assert(CE && "Expecting a call expression.");
-
- const LocationContext *LCtx = C.getLocationContext();
+DefinedSVal makeRetVal(CheckerContext &C, ConstCFGElementRef Elem) {
return C.getSValBuilder()
- .conjureSymbolVal(nullptr, CE, LCtx, C.blockCount())
+ .conjureSymbolVal(/*symbolTag=*/nullptr, Elem, C.getLocationContext(),
+ C.blockCount())
.castAs<DefinedSVal>();
}
ProgramStateRef bindAndAssumeTrue(ProgramStateRef State, CheckerContext &C,
- const CallExpr *CE) {
- DefinedSVal RetVal = makeRetVal(C, CE);
+ const CallExpr *CE, ConstCFGElementRef Elem) {
+ DefinedSVal RetVal = makeRetVal(C, Elem);
State = State->BindExpr(CE, C.getLocationContext(), RetVal);
State = State->assume(RetVal, true);
assert(State && "Assumption on new value should not fail.");
@@ -645,6 +643,7 @@ struct StreamOperationEvaluator {
SymbolRef StreamSym = nullptr;
const StreamState *SS = nullptr;
const CallExpr *CE = nullptr;
+ std::optional<ConstCFGElementRef> Elem;
StreamErrorState NewES;
StreamOperationEvaluator(CheckerContext &C)
@@ -664,6 +663,7 @@ struct StreamOperationEvaluator {
CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
if (!CE)
return false;
+ Elem = Call.getCFGElementRef();
assertStreamStateOpened(SS);
@@ -683,7 +683,7 @@ struct StreamOperationEvaluator {
}
ProgramStateRef makeAndBindRetVal(ProgramStateRef State, CheckerContext &C) {
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, Elem.value()).castAs<NonLoc>();
return State->BindExpr(CE, C.getLocationContext(), RetVal);
}
@@ -716,7 +716,7 @@ struct StreamOperationEvaluator {
ConstraintManager::ProgramStatePair
makeRetValAndAssumeDual(ProgramStateRef State, CheckerContext &C) {
- DefinedSVal RetVal = makeRetVal(C, CE);
+ DefinedSVal RetVal = makeRetVal(C, Elem.value());
State = State->BindExpr(CE, C.getLocationContext(), RetVal);
return C.getConstraintManager().assumeDual(State, RetVal);
}
@@ -858,7 +858,7 @@ escapeByStartIndexAndCount(ProgramStateRef State, const CallEvent &Call,
ITraits.setTrait(Element, DoNotInvalidateSuperRegion);
}
return State->invalidateRegions(
- EscapingVals, Call.getOriginExpr(), BlockCount, LCtx,
+ EscapingVals, Call.getCFGElementRef(), BlockCount, LCtx,
/*CausesPointerEscape=*/false,
/*InvalidatedSymbols=*/nullptr, &Call, &ITraits);
}
@@ -868,7 +868,7 @@ static ProgramStateRef escapeArgs(ProgramStateRef State, CheckerContext &C,
ArrayRef<unsigned int> EscapingArgs) {
auto GetArgSVal = [&Call](int Idx) { return Call.getArgSVal(Idx); };
auto EscapingVals = to_vector(map_range(EscapingArgs, GetArgSVal));
- State = State->invalidateRegions(EscapingVals, Call.getOriginExpr(),
+ State = State->invalidateRegions(EscapingVals, Call.getCFGElementRef(),
C.blockCount(), C.getLocationContext(),
/*CausesPointerEscape=*/false,
/*InvalidatedSymbols=*/nullptr);
@@ -931,7 +931,7 @@ void StreamChecker::evalFopen(const FnDescription *Desc, const CallEvent &Call,
if (!CE)
return;
- DefinedSVal RetVal = makeRetVal(C, CE);
+ DefinedSVal RetVal = makeRetVal(C, Call.getCFGElementRef());
SymbolRef RetSym = RetVal.getAsSymbol();
assert(RetSym && "RetVal must be a symbol here.");
@@ -1200,7 +1200,7 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
if (!IsFread && !PedanticMode)
return;
- NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.Elem.value()).castAs<NonLoc>();
ProgramStateRef StateFailed =
State->BindExpr(E.CE, C.getLocationContext(), RetVal);
StateFailed = E.assumeBinOpNN(StateFailed, BO_LT, RetVal, *NMembVal);
@@ -1235,7 +1235,7 @@ void StreamChecker::evalFgetx(const FnDescription *Desc, const CallEvent &Call,
State = escapeArgs(State, C, Call, {0});
if (SingleChar) {
// Generate a transition for the success state of `fgetc`.
- NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.Elem.value()).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
State->BindExpr(E.CE, C.getLocationContext(), RetVal);
// The returned 'unsigned char' of `fgetc` is converted to 'int',
@@ -1300,7 +1300,7 @@ void StreamChecker::evalFputx(const FnDescription *Desc, const CallEvent &Call,
C.addTransition(StateNotFailed);
} else {
// Generate a transition for the success state of `fputs`.
- NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.Elem.value()).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
State->BindExpr(E.CE, C.getLocationContext(), RetVal);
StateNotFailed =
@@ -1334,7 +1334,7 @@ void StreamChecker::evalFprintf(const FnDescription *Desc,
if (!E.Init(Desc, Call, C, State))
return;
- NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.Elem.value()).castAs<NonLoc>();
State = State->BindExpr(E.CE, C.getLocationContext(), RetVal);
auto Cond =
E.SVB
@@ -1379,7 +1379,7 @@ void StreamChecker::evalFscanf(const FnDescription *Desc, const CallEvent &Call,
// case, and no error flags are set on the stream. This is probably not
// accurate, and the POSIX documentation does not tell more.
if (!E.isStreamEof()) {
- NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.Elem.value()).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
State->BindExpr(E.CE, C.getLocationContext(), RetVal);
StateNotFailed =
@@ -1460,7 +1460,7 @@ void StreamChecker::evalGetdelim(const FnDescription *Desc,
State = escapeArgs(State, C, Call, {0, 1});
// Add transition for the successful state.
- NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.Elem.value()).castAs<NonLoc>();
ProgramStateRef StateNotFailed = E.bindReturnValue(State, C, RetVal);
StateNotFailed =
E.assumeBinOpNN(StateNotFailed, BO_GE, RetVal, E.getZeroVal(Call));
@@ -1601,7 +1601,7 @@ void StreamChecker::evalFtell(const FnDescription *Desc, const CallEvent &Call,
if (!E.Init(Desc, Call, C, State))
return;
- NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.Elem.value()).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
State->BindExpr(E.CE, C.getLocationContext(), RetVal);
StateNotFailed =
@@ -1735,7 +1735,8 @@ void StreamChecker::evalFeofFerror(const FnDescription *Desc,
// Execution path with error of ErrorKind.
// Function returns true.
// From now on it is the only one error state.
- ProgramStateRef TrueState = bindAndAssumeTrue(State, C, E.CE);
+ ProgramStateRef TrueState =
+ bindAndAssumeTrue(State, C, E.CE, E.Elem.value());
C.addTransition(E.setStreamState(
TrueState, StreamState::getOpened(Desc, ErrorKind,
E.SS->FilePositionIndeterminate &&
@@ -1769,7 +1770,7 @@ void StreamChecker::evalFileno(const FnDescription *Desc, const CallEvent &Call,
if (!E.Init(Desc, Call, C, State))
return;
- NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.Elem.value()).castAs<NonLoc>();
State = State->BindExpr(E.CE, C.getLocationContext(), RetVal);
State = E.assumeBinOpNN(State, BO_GE, RetVal, E.getZeroVal(Call));
if (!State)
diff --git a/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index 667b19f..77cec7d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -41,11 +41,8 @@ public:
}
bool operator<(const ZeroState &X) const {
- if (BlockID != X.BlockID)
- return BlockID < X.BlockID;
- if (SFC != X.SFC)
- return SFC < X.SFC;
- return ZeroSymbol < X.ZeroSymbol;
+ return std::tie(BlockID, SFC, ZeroSymbol) <
+ std::tie(X.BlockID, X.SFC, X.ZeroSymbol);
}
void Profile(llvm::FoldingSetNodeID &ID) const {
diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index bb4a39f..583315f 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -280,7 +280,7 @@ ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
// Invalidate designated regions using the batch invalidation API.
// NOTE: Even if RegionsToInvalidate is empty, we may still invalidate
// global variables.
- return Result->invalidateRegions(ValuesToInvalidate, getOriginExpr(),
+ return Result->invalidateRegions(ValuesToInvalidate, getCFGElementRef(),
BlockCount, getLocationContext(),
/*CausedByPointerEscape*/ true,
/*Symbols=*/nullptr, this, &ETraits);
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 40514cb..f71441a 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -422,7 +422,7 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
break;
case SubobjectAdjustment::MemberPointerAdjustment:
// FIXME: Unimplemented.
- State = State->invalidateRegions(Reg, InitWithAdjustments,
+ State = State->invalidateRegions(Reg, getCFGElementRef(),
currBldrCtx->blockCount(), LC, true,
nullptr, nullptr, nullptr);
return State;
@@ -439,8 +439,8 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
// values inside Reg would be correct.
SVal InitVal = State->getSVal(Init, LC);
if (InitVal.isUnknown()) {
- InitVal = getSValBuilder().conjureSymbolVal(Result, LC, Init->getType(),
- currBldrCtx->blockCount());
+ InitVal = getSValBuilder().conjureSymbolVal(
+ getCFGElementRef(), LC, Init->getType(), currBldrCtx->blockCount());
State = State->bindLoc(BaseReg.castAs<Loc>(), InitVal, LC, false);
// Then we'd need to take the value that certainly exists and bind it
@@ -449,7 +449,7 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
// Try to recover some path sensitivity in case we couldn't
// compute the value.
InitValWithAdjustments = getSValBuilder().conjureSymbolVal(
- Result, LC, InitWithAdjustments->getType(),
+ getCFGElementRef(), LC, InitWithAdjustments->getType(),
currBldrCtx->blockCount());
}
State =
@@ -1215,9 +1215,9 @@ void ExprEngine::ProcessInitializer(const CFGInitializer CFGInit,
// If we fail to get the value for some reason, use a symbolic value.
if (InitVal.isUnknownOrUndef()) {
SValBuilder &SVB = getSValBuilder();
- InitVal = SVB.conjureSymbolVal(BMI->getInit(), stackFrame,
- Field->getType(),
- currBldrCtx->blockCount());
+ InitVal =
+ SVB.conjureSymbolVal(getCFGElementRef(), stackFrame,
+ Field->getType(), currBldrCtx->blockCount());
}
} else {
InitVal = State->getSVal(BMI->getInit(), stackFrame);
@@ -2051,9 +2051,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
for (const auto N : preVisit) {
const LocationContext *LCtx = N->getLocationContext();
- SVal result = svalBuilder.conjureSymbolVal(nullptr, Ex, LCtx,
- resultType,
- currBldrCtx->blockCount());
+ SVal result = svalBuilder.conjureSymbolVal(
+ /*symbolTag=*/nullptr, getCFGElementRef(), LCtx, resultType,
+ currBldrCtx->blockCount());
ProgramStateRef State = N->getState()->BindExpr(Ex, LCtx, result);
// Escape pointers passed into the list, unless it's an ObjC boxed
@@ -2523,6 +2523,20 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
return true;
}
+/// Return the innermost location context which is inlined at `Node`, unless
+/// it's the top-level (entry point) location context.
+static const LocationContext *getInlinedLocationContext(ExplodedNode *Node,
+ ExplodedGraph &G) {
+ const LocationContext *CalleeLC = Node->getLocation().getLocationContext();
+ const LocationContext *RootLC =
+ (*G.roots_begin())->getLocation().getLocationContext();
+
+ if (CalleeLC->getStackFrame() == RootLC->getStackFrame())
+ return nullptr;
+
+ return CalleeLC;
+}
+
/// Block entrance. (Update counters).
void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
NodeBuilderWithSinks &nodeBuilder,
@@ -2556,10 +2570,19 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminatorStmt();
if (!isa_and_nonnull<ForStmt, WhileStmt, DoStmt, CXXForRangeStmt>(Term))
return;
+
// Widen.
const LocationContext *LCtx = Pred->getLocationContext();
+
+ // FIXME:
+ // We cannot use the CFG element from the via `ExprEngine::getCFGElementRef`
+ // since we are currently at the block entrance and the current reference
+ // would be stale. Ideally, we should pass on the terminator of the CFG
+ // block, but the terminator cannot be referred as a CFG element.
+ // Here we just pass the the first CFG element in the block.
ProgramStateRef WidenedState =
- getWidenedLoopState(Pred->getState(), LCtx, BlockCount, Term);
+ getWidenedLoopState(Pred->getState(), LCtx, BlockCount,
+ *nodeBuilder.getContext().getBlock()->ref_begin());
nodeBuilder.generateNode(WidenedState, Pred);
return;
}
@@ -2570,21 +2593,24 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
const ExplodedNode *Sink =
nodeBuilder.generateSink(Pred->getState(), Pred, &tag);
- // Check if we stopped at the top level function or not.
- // Root node should have the location context of the top most function.
- const LocationContext *CalleeLC = Pred->getLocation().getLocationContext();
- const LocationContext *CalleeSF = CalleeLC->getStackFrame();
- const LocationContext *RootLC =
- (*G.roots_begin())->getLocation().getLocationContext();
- if (RootLC->getStackFrame() != CalleeSF) {
- Engine.FunctionSummaries->markReachedMaxBlockCount(CalleeSF->getDecl());
+ if (const LocationContext *LC = getInlinedLocationContext(Pred, G)) {
+ // FIXME: This will unconditionally prevent inlining this function (even
+ // from other entry points), which is not a reasonable heuristic: even if
+ // we reached max block count on this particular execution path, there
+ // may be other execution paths (especially with other parametrizations)
+ // where the analyzer can reach the end of the function (so there is no
+ // natural reason to avoid inlining it). However, disabling this would
+ // significantly increase the analysis time (because more entry points
+ // would exhaust their allocated budget), so it must be compensated by a
+ // different (more reasonable) reduction of analysis scope.
+ Engine.FunctionSummaries->markShouldNotInline(
+ LC->getStackFrame()->getDecl());
// Re-run the call evaluation without inlining it, by storing the
// no-inlining policy in the state and enqueuing the new work item on
// the list. Replay should almost never fail. Use the stats to catch it
// if it does.
- if ((!AMgr.options.NoRetryExhausted &&
- replayWithoutInlining(Pred, CalleeLC)))
+ if ((!AMgr.options.NoRetryExhausted && replayWithoutInlining(Pred, LC)))
return;
NumMaxBlockCountReachedInInlined++;
} else
@@ -2856,8 +2882,29 @@ void ExprEngine::processBranch(
// conflicts with the widen-loop analysis option (which is off by
// default). If we intend to support and stabilize the loop widening,
// we must ensure that it 'plays nicely' with this logic.
- if (!SkipTrueBranch || AMgr.options.ShouldWidenLoops)
+ if (!SkipTrueBranch || AMgr.options.ShouldWidenLoops) {
Builder.generateNode(StTrue, true, PredN);
+ } else if (!AMgr.options.InlineFunctionsWithAmbiguousLoops) {
+ // FIXME: There is an ancient and arbitrary heuristic in
+ // `ExprEngine::processCFGBlockEntrance` which prevents all further
+ // inlining of a function if it finds an execution path within that
+ // function which reaches the `MaxBlockVisitOnPath` limit (a/k/a
+ // `analyzer-max-loop`, by default four iterations in a loop). Adding
+ // this "don't assume third iteration" logic significantly increased
+ // the analysis runtime on some inputs because less functions were
+ // arbitrarily excluded from being inlined, so more entry points used
+ // up their full allocated budget. As a hacky compensation for this,
+ // here we apply the "should not inline" mark in cases when the loop
+ // could potentially reach the `MaxBlockVisitOnPath` limit without the
+ // "don't assume third iteration" logic. This slightly overcompensates
+ // (activates if the third iteration can be entered, and will not
+ // recognize cases where the fourth iteration would't be completed), but
+ // should be good enough for practical purposes.
+ if (const LocationContext *LC = getInlinedLocationContext(Pred, G)) {
+ Engine.FunctionSummaries->markShouldNotInline(
+ LC->getStackFrame()->getDecl());
+ }
+ }
}
if (StFalse) {
@@ -3541,11 +3588,10 @@ void ExprEngine::VisitAtomicExpr(const AtomicExpr *AE, ExplodedNode *Pred,
ValuesToInvalidate.push_back(SubExprVal);
}
- State = State->invalidateRegions(ValuesToInvalidate, AE,
- currBldrCtx->blockCount(),
- LCtx,
- /*CausedByPointerEscape*/true,
- /*Symbols=*/nullptr);
+ State = State->invalidateRegions(ValuesToInvalidate, getCFGElementRef(),
+ currBldrCtx->blockCount(), LCtx,
+ /*CausedByPointerEscape*/ true,
+ /*Symbols=*/nullptr);
SVal ResultVal = UnknownVal();
State = State->BindExpr(AE, LCtx, ResultVal);
@@ -3892,7 +3938,8 @@ void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
assert(!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
if (std::optional<Loc> LV = X.getAs<Loc>())
- state = state->invalidateRegions(*LV, A, currBldrCtx->blockCount(),
+ state = state->invalidateRegions(*LV, getCFGElementRef(),
+ currBldrCtx->blockCount(),
Pred->getLocationContext(),
/*CausedByPointerEscape=*/true);
}
@@ -3902,7 +3949,8 @@ void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
SVal X = state->getSVal(I, Pred->getLocationContext());
if (std::optional<Loc> LV = X.getAs<Loc>())
- state = state->invalidateRegions(*LV, A, currBldrCtx->blockCount(),
+ state = state->invalidateRegions(*LV, getCFGElementRef(),
+ currBldrCtx->blockCount(),
Pred->getLocationContext(),
/*CausedByPointerEscape=*/true);
}
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 3d0a69a..6e52df5 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -21,18 +21,19 @@ using namespace ento;
using llvm::APSInt;
/// Optionally conjure and return a symbol for offset when processing
-/// an expression \p Expression.
+/// \p Elem.
/// If \p Other is a location, conjure a symbol for \p Symbol
/// (offset) if it is unknown so that memory arithmetic always
/// results in an ElementRegion.
/// \p Count The number of times the current basic block was visited.
-static SVal conjureOffsetSymbolOnLocation(
- SVal Symbol, SVal Other, Expr* Expression, SValBuilder &svalBuilder,
- unsigned Count, const LocationContext *LCtx) {
- QualType Ty = Expression->getType();
+static SVal conjureOffsetSymbolOnLocation(SVal Symbol, SVal Other,
+ ConstCFGElementRef Elem, QualType Ty,
+ SValBuilder &svalBuilder,
+ unsigned Count,
+ const LocationContext *LCtx) {
if (isa<Loc>(Other) && Ty->isIntegralOrEnumerationType() &&
Symbol.isUnknown()) {
- return svalBuilder.conjureSymbolVal(Expression, LCtx, Ty, Count);
+ return svalBuilder.conjureSymbolVal(Elem, LCtx, Ty, Count);
}
return Symbol;
}
@@ -65,7 +66,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// FIXME: Handle structs.
if (RightV.isUnknown()) {
unsigned Count = currBldrCtx->blockCount();
- RightV = svalBuilder.conjureSymbolVal(nullptr, B->getRHS(), LCtx,
+ RightV = svalBuilder.conjureSymbolVal(nullptr, getCFGElementRef(), LCtx,
Count);
}
// Simulate the effects of a "store": bind the value of the RHS
@@ -84,9 +85,11 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// SymSymExpr.
unsigned Count = currBldrCtx->blockCount();
RightV = conjureOffsetSymbolOnLocation(
- RightV, LeftV, RHS, svalBuilder, Count, LCtx);
- LeftV = conjureOffsetSymbolOnLocation(
- LeftV, RightV, LHS, svalBuilder, Count, LCtx);
+ RightV, LeftV, getCFGElementRef(), RHS->getType(), svalBuilder,
+ Count, LCtx);
+ LeftV = conjureOffsetSymbolOnLocation(LeftV, RightV, getCFGElementRef(),
+ LHS->getType(), svalBuilder,
+ Count, LCtx);
}
// Although we don't yet model pointers-to-members, we do need to make
@@ -165,7 +168,8 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// The symbolic value is actually for the type of the left-hand side
// expression, not the computation type, as this is the value the
// LValue on the LHS will bind to.
- LHSVal = svalBuilder.conjureSymbolVal(nullptr, B->getRHS(), LCtx, LTy,
+ LHSVal = svalBuilder.conjureSymbolVal(/*symbolTag=*/nullptr,
+ getCFGElementRef(), LCtx, LTy,
currBldrCtx->blockCount());
// However, we need to convert the symbol to the computation type.
Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
@@ -459,9 +463,9 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
} else {
// If we don't know if the cast succeeded, conjure a new symbol.
if (val.isUnknown()) {
- DefinedOrUnknownSVal NewSym =
- svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx, resultType,
- currBldrCtx->blockCount());
+ DefinedOrUnknownSVal NewSym = svalBuilder.conjureSymbolVal(
+ /*symbolTag=*/nullptr, getCFGElementRef(), LCtx, resultType,
+ currBldrCtx->blockCount());
state = state->BindExpr(CastE, LCtx, NewSym);
} else
// Else, bind to the derived region value.
@@ -483,9 +487,9 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
// Failed to cast or the result is unknown, fall back to conservative.
if (val.isUnknown()) {
- val =
- svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx, resultType,
- currBldrCtx->blockCount());
+ val = svalBuilder.conjureSymbolVal(
+ /*symbolTag=*/nullptr, getCFGElementRef(), LCtx, resultType,
+ currBldrCtx->blockCount());
}
state = state->BindExpr(CastE, LCtx, val);
Bldr.generateNode(CastE, Pred, state);
@@ -529,7 +533,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
SVal result = svalBuilder.conjureSymbolVal(
- /*symbolTag=*/nullptr, CastE, LCtx, resultType,
+ /*symbolTag=*/nullptr, getCFGElementRef(), LCtx, resultType,
currBldrCtx->blockCount());
state = state->BindExpr(CastE, LCtx, result);
Bldr.generateNode(CastE, Pred, state);
@@ -621,8 +625,9 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
Ty = getContext().getPointerType(Ty);
}
- InitVal = svalBuilder.conjureSymbolVal(nullptr, InitEx, LC, Ty,
- currBldrCtx->blockCount());
+ InitVal = svalBuilder.conjureSymbolVal(
+ /*symbolTag=*/nullptr, getCFGElementRef(), LC, Ty,
+ currBldrCtx->blockCount());
}
@@ -839,7 +844,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
}
if (!hasValue)
- V = svalBuilder.conjureSymbolVal(nullptr, Ex, LCtx,
+ V = svalBuilder.conjureSymbolVal(nullptr, getCFGElementRef(), LCtx,
currBldrCtx->blockCount());
// Generate a new node with the binding from the appropriate path.
@@ -1121,9 +1126,9 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
// Conjure a new symbol if necessary to recover precision.
if (Result.isUnknown()){
- DefinedOrUnknownSVal SymVal =
- svalBuilder.conjureSymbolVal(nullptr, U, LCtx,
- currBldrCtx->blockCount());
+ DefinedOrUnknownSVal SymVal = svalBuilder.conjureSymbolVal(
+ /*symbolTag=*/nullptr, getCFGElementRef(), LCtx,
+ currBldrCtx->blockCount());
Result = SymVal;
// If the value is a location, ++/-- should always preserve
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index e07e24f..8535384 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -244,8 +244,8 @@ SVal ExprEngine::computeObjectUnderConstruction(
assert(RetE && "Void returns should not have a construction context");
QualType ReturnTy = RetE->getType();
QualType RegionTy = ACtx.getPointerType(ReturnTy);
- return SVB.conjureSymbolVal(&TopLevelSymRegionTag, RetE, SFC, RegionTy,
- currBldrCtx->blockCount());
+ return SVB.conjureSymbolVal(&TopLevelSymRegionTag, getCFGElementRef(),
+ SFC, RegionTy, currBldrCtx->blockCount());
}
llvm_unreachable("Unhandled return value construction context!");
}
@@ -981,10 +981,11 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// a custom global allocator.
if (symVal.isUnknown()) {
if (IsStandardGlobalOpNewFunction)
- symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
+ symVal = svalBuilder.getConjuredHeapSymbolVal(getCFGElementRef(), LCtx,
+ CNE->getType(), blockCount);
else
- symVal = svalBuilder.conjureSymbolVal(nullptr, CNE, LCtx, CNE->getType(),
- blockCount);
+ symVal = svalBuilder.conjureSymbolVal(
+ /*symbolTag=*/nullptr, getCFGElementRef(), LCtx, blockCount);
}
CallEventManager &CEMgr = getStateManager().getCallEventManager();
@@ -1117,7 +1118,7 @@ void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS, ExplodedNode *Pred,
}
const LocationContext *LCtx = Pred->getLocationContext();
- SVal V = svalBuilder.conjureSymbolVal(CS, LCtx, VD->getType(),
+ SVal V = svalBuilder.conjureSymbolVal(getCFGElementRef(), LCtx, VD->getType(),
currBldrCtx->blockCount());
ProgramStateRef state = Pred->getState();
state = state->bindLoc(state->getLValue(VD, LCtx), V, LCtx);
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 1a44ba4..90625a9 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -746,6 +746,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
const LocationContext *LCtx,
ProgramStateRef State) {
const Expr *E = Call.getOriginExpr();
+ const ConstCFGElementRef &Elem = Call.getCFGElementRef();
if (!E)
return State;
@@ -788,7 +789,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
RegionAndSymbolInvalidationTraits ITraits;
ITraits.setTrait(TargetR,
RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
- State = State->invalidateRegions(TargetR, E, Count, LCtx,
+ State = State->invalidateRegions(TargetR, Elem, Count, LCtx,
/* CausesPointerEscape=*/false, nullptr,
&Call, &ITraits);
@@ -800,7 +801,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
// a regular unknown pointer.
const auto *CNE = dyn_cast<CXXNewExpr>(E);
if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
- R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
+ R = svalBuilder.getConjuredHeapSymbolVal(Elem, LCtx, E->getType(), Count);
const MemRegion *MR = R.getAsRegion()->StripCasts();
// Store the extent of the allocated object(s).
@@ -824,7 +825,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>());
} else {
- R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
+ R = svalBuilder.conjureSymbolVal(Elem, LCtx, ResultTy, Count);
}
}
return State->BindExpr(E, LCtx, R);
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index 9426e0a..f2e5a16 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -45,7 +45,7 @@ void ExprEngine::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
/// for-loop iterator.
static void populateObjCForDestinationSet(
ExplodedNodeSet &dstLocation, SValBuilder &svalBuilder,
- const ObjCForCollectionStmt *S, const Stmt *elem, SVal elementV,
+ const ObjCForCollectionStmt *S, ConstCFGElementRef elem, SVal elementV,
SymbolManager &SymMgr, const NodeBuilderContext *currBldrCtx,
StmtNodeBuilder &Bldr, bool hasElements) {
@@ -66,8 +66,8 @@ static void populateObjCForDestinationSet(
SVal V;
if (hasElements) {
- SymbolRef Sym = SymMgr.conjureSymbol(elem, LCtx, T,
- currBldrCtx->blockCount());
+ SymbolRef Sym =
+ SymMgr.conjureSymbol(elem, LCtx, T, currBldrCtx->blockCount());
V = svalBuilder.makeLoc(Sym);
} else {
V = svalBuilder.makeIntVal(0, T);
@@ -110,6 +110,7 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
const Stmt *elem = S->getElement();
const Stmt *collection = S->getCollection();
+ const ConstCFGElementRef &elemRef = getCFGElementRef();
ProgramStateRef state = Pred->getState();
SVal collectionV = state->getSVal(collection, Pred->getLocationContext());
@@ -132,11 +133,12 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
StmtNodeBuilder Bldr(dstLocation, Tmp, *currBldrCtx);
if (!isContainerNull)
- populateObjCForDestinationSet(DstLocationSingleton, svalBuilder, S, elem,
- elementV, SymMgr, currBldrCtx, Bldr,
+ populateObjCForDestinationSet(DstLocationSingleton, svalBuilder, S,
+ elemRef, elementV, SymMgr, currBldrCtx,
+ Bldr,
/*hasElements=*/true);
- populateObjCForDestinationSet(DstLocationSingleton, svalBuilder, S, elem,
+ populateObjCForDestinationSet(DstLocationSingleton, svalBuilder, S, elemRef,
elementV, SymMgr, currBldrCtx, Bldr,
/*hasElements=*/false);
diff --git a/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp b/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
index 9e42801..2cddf1f 100644
--- a/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
+++ b/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -13,10 +13,10 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
#include "clang/AST/AST.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
using namespace clang;
using namespace ento;
@@ -24,31 +24,13 @@ using namespace clang::ast_matchers;
const auto MatchRef = "matchref";
-/// Return the loops condition Stmt or NULL if LoopStmt is not a loop
-static const Expr *getLoopCondition(const Stmt *LoopStmt) {
- switch (LoopStmt->getStmtClass()) {
- default:
- return nullptr;
- case Stmt::ForStmtClass:
- return cast<ForStmt>(LoopStmt)->getCond();
- case Stmt::WhileStmtClass:
- return cast<WhileStmt>(LoopStmt)->getCond();
- case Stmt::DoStmtClass:
- return cast<DoStmt>(LoopStmt)->getCond();
- case Stmt::CXXForRangeStmtClass:
- return cast<CXXForRangeStmt>(LoopStmt)->getCond();
- }
-}
-
namespace clang {
namespace ento {
ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
const LocationContext *LCtx,
- unsigned BlockCount, const Stmt *LoopStmt) {
-
- assert((isa<ForStmt, WhileStmt, DoStmt, CXXForRangeStmt>(LoopStmt)));
-
+ unsigned BlockCount,
+ ConstCFGElementRef Elem) {
// Invalidate values in the current state.
// TODO Make this more conservative by only invalidating values that might
// be modified by the body of the loop.
@@ -93,9 +75,8 @@ ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
RegionAndSymbolInvalidationTraits::TK_PreserveContents);
}
- return PrevState->invalidateRegions(Regions, getLoopCondition(LoopStmt),
- BlockCount, LCtx, true, nullptr, nullptr,
- &ITraits);
+ return PrevState->invalidateRegions(Regions, Elem, BlockCount, LCtx, true,
+ nullptr, nullptr, &ITraits);
}
} // end namespace ento
diff --git a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 492209d..19af899 100644
--- a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -149,7 +149,7 @@ typedef ArrayRef<const MemRegion *> RegionList;
typedef ArrayRef<SVal> ValueList;
ProgramStateRef ProgramState::invalidateRegions(
- RegionList Regions, const Stmt *S, unsigned Count,
+ RegionList Regions, ConstCFGElementRef Elem, unsigned Count,
const LocationContext *LCtx, bool CausedByPointerEscape,
InvalidatedSymbols *IS, const CallEvent *Call,
RegionAndSymbolInvalidationTraits *ITraits) const {
@@ -157,12 +157,12 @@ ProgramStateRef ProgramState::invalidateRegions(
for (const MemRegion *Reg : Regions)
Values.push_back(loc::MemRegionVal(Reg));
- return invalidateRegions(Values, S, Count, LCtx, CausedByPointerEscape, IS,
+ return invalidateRegions(Values, Elem, Count, LCtx, CausedByPointerEscape, IS,
Call, ITraits);
}
ProgramStateRef ProgramState::invalidateRegions(
- ValueList Values, const Stmt *S, unsigned Count,
+ ValueList Values, ConstCFGElementRef Elem, unsigned Count,
const LocationContext *LCtx, bool CausedByPointerEscape,
InvalidatedSymbols *IS, const CallEvent *Call,
RegionAndSymbolInvalidationTraits *ITraits) const {
@@ -181,7 +181,7 @@ ProgramStateRef ProgramState::invalidateRegions(
StoreManager::InvalidatedRegions TopLevelInvalidated;
StoreManager::InvalidatedRegions Invalidated;
const StoreRef &NewStore = Mgr.StoreMgr->invalidateRegions(
- getStore(), Values, S, Count, LCtx, Call, *IS, *ITraits,
+ getStore(), Values, Elem, Count, LCtx, Call, *IS, *ITraits,
&TopLevelInvalidated, &Invalidated);
ProgramStateRef NewState = makeWithStore(NewStore);
diff --git a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 1cc9cb8..b692837 100644
--- a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -563,15 +563,15 @@ public:
//===-------------------------------------------------------------------===//
// Binding values to regions.
//===-------------------------------------------------------------------===//
- RegionBindingsRef invalidateGlobalRegion(MemRegion::Kind K, const Stmt *S,
- unsigned Count,
- const LocationContext *LCtx,
- RegionBindingsRef B,
- InvalidatedRegions *Invalidated);
-
- StoreRef invalidateRegions(Store store, ArrayRef<SVal> Values, const Stmt *S,
- unsigned Count, const LocationContext *LCtx,
- const CallEvent *Call, InvalidatedSymbols &IS,
+ RegionBindingsRef
+ invalidateGlobalRegion(MemRegion::Kind K, ConstCFGElementRef Elem,
+ unsigned Count, const LocationContext *LCtx,
+ RegionBindingsRef B, InvalidatedRegions *Invalidated);
+
+ StoreRef invalidateRegions(Store store, ArrayRef<SVal> Values,
+ ConstCFGElementRef Elem, unsigned Count,
+ const LocationContext *LCtx, const CallEvent *Call,
+ InvalidatedSymbols &IS,
RegionAndSymbolInvalidationTraits &ITraits,
InvalidatedRegions *Invalidated,
InvalidatedRegions *InvalidatedTopLevel) override;
@@ -1147,7 +1147,7 @@ RegionStoreManager::removeSubRegionBindings(LimitedRegionBindingsConstRef B,
namespace {
class InvalidateRegionsWorker : public ClusterAnalysis<InvalidateRegionsWorker>
{
- const Stmt *S;
+ ConstCFGElementRef Elem;
unsigned Count;
const LocationContext *LCtx;
InvalidatedSymbols &IS;
@@ -1156,12 +1156,13 @@ class InvalidateRegionsWorker : public ClusterAnalysis<InvalidateRegionsWorker>
GlobalsFilterKind GlobalsFilter;
public:
InvalidateRegionsWorker(RegionStoreManager &rm, ProgramStateManager &stateMgr,
- RegionBindingsRef b, const Stmt *S, unsigned count,
- const LocationContext *lctx, InvalidatedSymbols &is,
+ RegionBindingsRef b, ConstCFGElementRef elem,
+ unsigned count, const LocationContext *lctx,
+ InvalidatedSymbols &is,
RegionAndSymbolInvalidationTraits &ITraitsIn,
StoreManager::InvalidatedRegions *r,
GlobalsFilterKind GFK)
- : ClusterAnalysis<InvalidateRegionsWorker>(rm, stateMgr, b), S(S),
+ : ClusterAnalysis<InvalidateRegionsWorker>(rm, stateMgr, b), Elem(elem),
Count(count), LCtx(lctx), IS(is), ITraits(ITraitsIn), Regions(r),
GlobalsFilter(GFK) {}
@@ -1296,7 +1297,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelevant.
DefinedOrUnknownSVal V =
- svalBuilder.conjureSymbolVal(baseR, S, LCtx, Ctx.IntTy, Count);
+ svalBuilder.conjureSymbolVal(baseR, Elem, LCtx, Ctx.IntTy, Count);
B = B.addBinding(baseR, BindingKey::Default, V);
return;
}
@@ -1318,7 +1319,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelevant.
DefinedOrUnknownSVal V =
- svalBuilder.conjureSymbolVal(baseR, S, LCtx, Ctx.IntTy, Count);
+ svalBuilder.conjureSymbolVal(baseR, Elem, LCtx, Ctx.IntTy, Count);
B = B.addBinding(baseR, BindingKey::Default, V);
return;
}
@@ -1386,13 +1387,13 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
conjure_default:
// Set the default value of the array to conjured symbol.
DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(
- baseR, S, LCtx, AT->getElementType(), Count);
+ baseR, Elem, LCtx, AT->getElementType(), Count);
B = B.addBinding(baseR, BindingKey::Default, V);
return;
}
DefinedOrUnknownSVal V =
- svalBuilder.conjureSymbolVal(baseR, S, LCtx, T, Count);
+ svalBuilder.conjureSymbolVal(baseR, Elem, LCtx, T, Count);
assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
B = B.addBinding(baseR, BindingKey::Direct, V);
}
@@ -1421,15 +1422,15 @@ bool InvalidateRegionsWorker::includeEntireMemorySpace(const MemRegion *Base) {
}
RegionBindingsRef RegionStoreManager::invalidateGlobalRegion(
- MemRegion::Kind K, const Stmt *S, unsigned Count,
+ MemRegion::Kind K, ConstCFGElementRef Elem, unsigned Count,
const LocationContext *LCtx, RegionBindingsRef B,
InvalidatedRegions *Invalidated) {
// Bind the globals memory space to a new symbol that we will use to derive
// the bindings for all globals.
const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K);
- SVal V =
- svalBuilder.conjureSymbolVal(/* symbolTag = */ (const void *)GS, S, LCtx,
- /* type does not matter */ Ctx.IntTy, Count);
+ SVal V = svalBuilder.conjureSymbolVal(
+ /* symbolTag = */ (const void *)GS, Elem, LCtx,
+ /* type does not matter */ Ctx.IntTy, Count);
B = B.removeBinding(GS)
.addBinding(BindingKey::Make(GS, BindingKey::Default), V);
@@ -1464,7 +1465,7 @@ void RegionStoreManager::populateWorkList(InvalidateRegionsWorker &W,
}
StoreRef RegionStoreManager::invalidateRegions(
- Store store, ArrayRef<SVal> Values, const Stmt *S, unsigned Count,
+ Store store, ArrayRef<SVal> Values, ConstCFGElementRef Elem, unsigned Count,
const LocationContext *LCtx, const CallEvent *Call, InvalidatedSymbols &IS,
RegionAndSymbolInvalidationTraits &ITraits,
InvalidatedRegions *TopLevelRegions, InvalidatedRegions *Invalidated) {
@@ -1479,7 +1480,7 @@ StoreRef RegionStoreManager::invalidateRegions(
}
RegionBindingsRef B = getRegionBindings(store);
- InvalidateRegionsWorker W(*this, StateMgr, B, S, Count, LCtx, IS, ITraits,
+ InvalidateRegionsWorker W(*this, StateMgr, B, Elem, Count, LCtx, IS, ITraits,
Invalidated, GlobalsFilter);
// Scan the bindings and generate the clusters.
@@ -1499,12 +1500,12 @@ StoreRef RegionStoreManager::invalidateRegions(
// TODO: This could possibly be more precise with modules.
switch (GlobalsFilter) {
case GFK_All:
- B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind, S,
+ B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind, Elem,
Count, LCtx, B, Invalidated);
[[fallthrough]];
case GFK_SystemOnly:
- B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind, S, Count,
- LCtx, B, Invalidated);
+ B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind, Elem,
+ Count, LCtx, B, Invalidated);
[[fallthrough]];
case GFK_None:
break;
diff --git a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 9e0800b..55ac94a 100644
--- a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -153,9 +153,11 @@ SValBuilder::getRegionValueSymbolVal(const TypedValueRegion *region) {
}
DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *SymbolTag,
- const Expr *Ex,
+ ConstCFGElementRef elem,
const LocationContext *LCtx,
unsigned Count) {
+ const Expr *Ex = dyn_cast<Expr>(elem->getAs<CFGStmt>()->getStmt());
+ assert(Ex && "elem must be a CFGStmt containing an Expr");
QualType T = Ex->getType();
if (T->isNullPtrType())
@@ -167,11 +169,11 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *SymbolTag,
if (Ex->isGLValue())
T = LCtx->getAnalysisDeclContext()->getASTContext().getPointerType(ExType);
- return conjureSymbolVal(SymbolTag, Ex, LCtx, T, Count);
+ return conjureSymbolVal(SymbolTag, elem, LCtx, T, Count);
}
DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
- const Stmt *St,
+ ConstCFGElementRef elem,
const LocationContext *LCtx,
QualType type,
unsigned count) {
@@ -181,7 +183,7 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
if (!SymbolManager::canSymbolicate(type))
return UnknownVal();
- SymbolRef sym = SymMgr.conjureSymbol(St, LCtx, type, count, symbolTag);
+ SymbolRef sym = SymMgr.conjureSymbol(elem, LCtx, type, count, symbolTag);
if (Loc::isLocType(type))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
@@ -189,36 +191,30 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
return nonloc::SymbolVal(sym);
}
-DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const Stmt *stmt,
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(ConstCFGElementRef elem,
const LocationContext *LCtx,
QualType type,
unsigned visitCount) {
- return conjureSymbolVal(/*symbolTag=*/nullptr, stmt, LCtx, type, visitCount);
+ return conjureSymbolVal(/*symbolTag=*/nullptr, elem, LCtx, type, visitCount);
}
DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const CallEvent &call,
unsigned visitCount,
const void *symbolTag) {
- return conjureSymbolVal(symbolTag, call.getOriginExpr(),
- call.getLocationContext(), visitCount);
+ return conjureSymbolVal(symbolTag, call.getCFGElementRef(),
+ call.getLocationContext(), call.getResultType(),
+ visitCount);
}
DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const CallEvent &call,
QualType type,
unsigned visitCount,
const void *symbolTag) {
- return conjureSymbolVal(symbolTag, call.getOriginExpr(),
+ return conjureSymbolVal(symbolTag, call.getCFGElementRef(),
call.getLocationContext(), type, visitCount);
}
-DefinedSVal SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
- const LocationContext *LCtx,
- unsigned VisitCount) {
- QualType T = E->getType();
- return getConjuredHeapSymbolVal(E, LCtx, T, VisitCount);
-}
-
-DefinedSVal SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
+DefinedSVal SValBuilder::getConjuredHeapSymbolVal(ConstCFGElementRef elem,
const LocationContext *LCtx,
QualType type,
unsigned VisitCount) {
@@ -230,7 +226,7 @@ DefinedSVal SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
return makeZeroVal(type).castAs<DefinedSVal>();
}
- SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, type, VisitCount);
+ SymbolRef sym = SymMgr.conjureSymbol(elem, LCtx, type, VisitCount);
return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym));
}
diff --git a/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index a4648f5..a6ade66 100644
--- a/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -82,7 +82,7 @@ void UnarySymExpr::dumpToStream(raw_ostream &os) const {
void SymbolConjured::dumpToStream(raw_ostream &os) const {
os << getKindStr() << getSymbolID() << '{' << T << ", LC" << LCtx->getID();
- if (S)
+ if (auto *S = getStmt())
os << ", S" << S->getID(LCtx->getDecl()->getASTContext());
else
os << ", no stmt";
diff --git a/clang/test/AST/HLSL/RootSignatures-AST.hlsl b/clang/test/AST/HLSL/RootSignatures-AST.hlsl
new file mode 100644
index 0000000..c700174
--- /dev/null
+++ b/clang/test/AST/HLSL/RootSignatures-AST.hlsl
@@ -0,0 +1,75 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \
+// RUN: -disable-llvm-passes -o - %s | FileCheck %s
+
+// This test ensures that the sample root signature is parsed without error and
+// the Attr AST Node is created succesfully. If an invalid root signature was
+// passed in then we would exit out of Sema before the Attr is created.
+
+#define SampleRS \
+ "DescriptorTable( " \
+ " CBV(b1), " \
+ " SRV(t1, numDescriptors = 8, " \
+ " flags = DESCRIPTORS_VOLATILE), " \
+ " UAV(u1, numDescriptors = 0, " \
+ " flags = DESCRIPTORS_VOLATILE) " \
+ "), " \
+ "DescriptorTable(Sampler(s0, numDescriptors = 4, space = 1))"
+
+// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[SAMPLE_RS_DECL:__hlsl_rootsig_decl_\d*]]
+// CHECK-SAME: RootElements{
+// CHECK-SAME: CBV(b1, numDescriptors = 1, space = 0,
+// CHECK-SAME: offset = DescriptorTableOffsetAppend, flags = DataStaticWhileSetAtExecute),
+// CHECK-SAME: SRV(t1, numDescriptors = 8, space = 0,
+// CHECK-SAME: offset = DescriptorTableOffsetAppend, flags = DescriptorsVolatile),
+// CHECK-SAME: UAV(u1, numDescriptors = 0, space = 0,
+// CHECK-SAME: offset = DescriptorTableOffsetAppend, flags = DescriptorsVolatile),
+// CHECK-SAME: DescriptorTable(numClauses = 3, visibility = All),
+// CHECK-SAME: Sampler(s0, numDescriptors = 4, space = 1,
+// CHECK-SAME: offset = DescriptorTableOffsetAppend, flags = None),
+// CHECK-SAME: DescriptorTable(numClauses = 1, visibility = All)
+// CHECK-SAME: }
+
+// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[SAMPLE_RS_DECL]]
+[RootSignature(SampleRS)]
+void rs_main() {}
+
+// Ensure that if multiple root signatures are specified at different entry
+// points that we point to the correct root signature
+
+// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[SAMPLE_RS_DECL]]
+[RootSignature(SampleRS)]
+void same_rs_main() {}
+
+// Define the same root signature to ensure that the entry point will still
+// link to the same root signature declaration
+
+#define SampleSameRS \
+ "DescriptorTable( " \
+ " CBV(b1), " \
+ " SRV(t1, numDescriptors = 8, " \
+ " flags = DESCRIPTORS_VOLATILE), " \
+ " UAV(u1, numDescriptors = 0, " \
+ " flags = DESCRIPTORS_VOLATILE) " \
+ "), " \
+ "DescriptorTable(Sampler(s0, numDescriptors = 4, space = 1))"
+
+// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[SAMPLE_RS_DECL]]
+[RootSignature(SampleSameRS)]
+void same_rs_string_main() {}
+
+#define SampleDifferentRS \
+ "DescriptorTable(Sampler(s0, numDescriptors = 4, space = 1))"
+
+// Ensure that when we define a different type root signature that it creates
+// a seperate decl and identifier to reference
+
+// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[DIFF_RS_DECL:__hlsl_rootsig_decl_\d*]]
+// CHECK-SAME: RootElements{
+// CHECK-SAME: Sampler(s0, numDescriptors = 4, space = 1,
+// CHECK-SAME: offset = DescriptorTableOffsetAppend, flags = None),
+// CHECK-SAME: DescriptorTable(numClauses = 1, visibility = All)
+// CHECK-SAME: }
+
+// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[DIFF_RS_DECL]]
+[RootSignature(SampleDifferentRS)]
+void different_rs_string_main() {}
diff --git a/clang/test/Analysis/PR57270.cpp b/clang/test/Analysis/PR57270.cpp
new file mode 100644
index 0000000..7d7a658
--- /dev/null
+++ b/clang/test/Analysis/PR57270.cpp
@@ -0,0 +1,30 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=debug.ExprInspection -verify %s
+
+using size_t = __typeof(sizeof(int));
+
+void clang_analyzer_explain(int);
+void clang_analyzer_dump(int);
+void *memset(void *, int, size_t);
+
+struct S
+{
+ static int a;
+ ~S(){};
+};
+
+int S::a = 0;
+
+void foo()
+{
+ S::a = 0;
+
+ int x = 3;
+ memset(&x, 1, sizeof(x));
+
+ S *arr = new S[x];
+ delete[] arr;
+
+ clang_analyzer_dump(S::a); // expected-warning-re{{{{derived_\$[0-9]+{conj_\$[0-9]+{int, LC[0-9]+, S[0-9]+, #[0-9]+},a}}}}}
+
+ clang_analyzer_explain(S::a); // expected-warning-re{{{{value derived from \(symbol of type 'int' conjured at CFG element '->~S\(\) \(Implicit destructor\)'\) for global variable 'S::a'}}}}
+}
diff --git a/clang/test/Analysis/analyzer-config.c b/clang/test/Analysis/analyzer-config.c
index 80cad54..7936273 100644
--- a/clang/test/Analysis/analyzer-config.c
+++ b/clang/test/Analysis/analyzer-config.c
@@ -89,6 +89,7 @@
// CHECK-NEXT: graph-trim-interval = 1000
// CHECK-NEXT: ignore-bison-generated-files = true
// CHECK-NEXT: ignore-flex-generated-files = true
+// CHECK-NEXT: inline-functions-with-ambiguous-loops = false
// CHECK-NEXT: inline-lambdas = true
// CHECK-NEXT: ipa = dynamic-bifurcate
// CHECK-NEXT: ipa-always-inline-size = 3
diff --git a/clang/test/Analysis/container-modeling.cpp b/clang/test/Analysis/container-modeling.cpp
index bf4a12a..5dcb627 100644
--- a/clang/test/Analysis/container-modeling.cpp
+++ b/clang/test/Analysis/container-modeling.cpp
@@ -196,7 +196,7 @@ void pop_front(std::list<int> &L, int n) {
void push_back() {
std::vector<int> V;
V.end();
-
+
clang_analyzer_denote(clang_analyzer_container_end(V), "$V.end()");
V.push_back(1); // expected-note{{Container 'V' extended to the back by 1 position}}
@@ -256,7 +256,7 @@ void print_state(std::vector<int> &V) {
V.cend();
clang_analyzer_printState();
-
+
// CHECK: "checker_messages": [
// CHECK-NEXT: { "checker": "alpha.cplusplus.ContainerModeling", "messages": [
// CHECK-NEXT: "Container Data :",
diff --git a/clang/test/Analysis/dump_egraph.cpp b/clang/test/Analysis/dump_egraph.cpp
index 1345969..2cea5f7 100644
--- a/clang/test/Analysis/dump_egraph.cpp
+++ b/clang/test/Analysis/dump_egraph.cpp
@@ -24,4 +24,3 @@ void foo() {
// CHECK: \"cluster\": \"t\", \"pointer\": \"{{0x[0-9a-f]+}}\", \"items\": [\l&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\{ \"kind\": \"Default\", \"offset\": 0, \"value\": \"conj_$3\{int, LC5, no stmt, #1\}\"
// CHECK: \"dynamic_types\": [\l&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\{ \"region\": \"HeapSymRegion\{conj_$1\{S *, LC1, S{{[0-9]+}}, #1\}\}\", \"dyn_type\": \"S\", \"sub_classable\": false \}\l
-
diff --git a/clang/test/Analysis/explain-svals.cpp b/clang/test/Analysis/explain-svals.cpp
index d1615e6..267980c 100644
--- a/clang/test/Analysis/explain-svals.cpp
+++ b/clang/test/Analysis/explain-svals.cpp
@@ -47,12 +47,12 @@ void test_1(int param, void *ptr) {
void test_2(char *ptr, int ext) {
clang_analyzer_explain((void *) "asdf"); // expected-warning-re{{{{^pointer to element of type 'char' with index 0 of string literal "asdf"$}}}}
clang_analyzer_explain(strlen(ptr)); // expected-warning-re{{{{^metadata of type 'unsigned long' tied to pointee of argument 'ptr'$}}}}
- clang_analyzer_explain(conjure()); // expected-warning-re{{{{^symbol of type 'int' conjured at statement 'conjure\(\)'$}}}}
- clang_analyzer_explain(glob); // expected-warning-re{{{{^value derived from \(symbol of type 'int' conjured at statement 'conjure\(\)'\) for global variable 'glob'$}}}}
- clang_analyzer_explain(glob_ptr); // expected-warning-re{{{{^value derived from \(symbol of type 'int' conjured at statement 'conjure\(\)'\) for global variable 'glob_ptr'$}}}}
+ clang_analyzer_explain(conjure()); // expected-warning-re{{{{^symbol of type 'int' conjured at CFG element 'conjure\(\)'$}}}}
+ clang_analyzer_explain(glob); // expected-warning-re{{{{^value derived from \(symbol of type 'int' conjured at CFG element 'conjure\(\)'\) for global variable 'glob'$}}}}
+ clang_analyzer_explain(glob_ptr); // expected-warning-re{{{{^value derived from \(symbol of type 'int' conjured at CFG element 'conjure\(\)'\) for global variable 'glob_ptr'$}}}}
clang_analyzer_explain(clang_analyzer_getExtent(ptr)); // expected-warning-re{{{{^extent of pointee of argument 'ptr'$}}}}
int *x = new int[ext];
- clang_analyzer_explain(x); // expected-warning-re{{{{^pointer to element of type 'int' with index 0 of heap segment that starts at symbol of type 'int \*' conjured at statement 'new int \[ext\]'$}}}}
+ clang_analyzer_explain(x); // expected-warning-re{{{{^pointer to element of type 'int' with index 0 of heap segment that starts at symbol of type 'int \*' conjured at CFG element 'CFGNewAllocator\(int \*\)'$}}}}
// Sic! What gets computed is the extent of the element-region.
clang_analyzer_explain(clang_analyzer_getExtent(x)); // expected-warning-re{{{{^\(argument 'ext'\) \* 4$}}}}
delete[] x;
@@ -99,8 +99,8 @@ public:
} // end of anonymous namespace
void test_6() {
- clang_analyzer_explain(conjure_S()); // expected-warning-re{{{{^symbol of type 'int' conjured at statement 'conjure_S\(\)'$}}}}
- clang_analyzer_explain(conjure_S().z); // expected-warning-re{{{{^value derived from \(symbol of type 'int' conjured at statement 'conjure_S\(\)'\) for field 'z' of temporary object constructed at statement 'conjure_S\(\)'$}}}}
+ clang_analyzer_explain(conjure_S()); // expected-warning-re{{{{^symbol of type 'int' conjured at CFG element 'conjure_S\(\) \(CXXRecordTypedCall, \+0\)'$}}}}
+ clang_analyzer_explain(conjure_S().z); // expected-warning-re{{{{^value derived from \(symbol of type 'int' conjured at CFG element 'conjure_S\(\) \(CXXRecordTypedCall, \)'\) for field 'z' of temporary object constructed at statement 'conjure_S\(\)'$}}}}
}
class C_top_level {
diff --git a/clang/test/Analysis/explain-svals.m b/clang/test/Analysis/explain-svals.m
index e93258b..e79ceab 100644
--- a/clang/test/Analysis/explain-svals.m
+++ b/clang/test/Analysis/explain-svals.m
@@ -17,8 +17,8 @@ void test_1(Object *p) {
clang_analyzer_explain(p); // expected-warning-re{{{{^argument 'p'$}}}}
clang_analyzer_explain(p->x); // expected-warning-re{{{{^initial value of instance variable 'x' of object at argument 'p'$}}}}
Object *q = [[Object alloc] init];
- clang_analyzer_explain(q); // expected-warning-re{{{{^symbol of type 'Object \*' conjured at statement '\[\[Object alloc\] init\]'$}}}}
- clang_analyzer_explain(q->x); // expected-warning-re{{{{^initial value of instance variable 'x' of object at symbol of type 'Object \*' conjured at statement '\[\[Object alloc\] init\]'$}}}}
+ clang_analyzer_explain(q); // expected-warning-re{{{{^symbol of type 'Object \*' conjured at CFG element '\[\[Object alloc\] init\]'$}}}}
+ clang_analyzer_explain(q->x); // expected-warning-re{{{{^initial value of instance variable 'x' of object at symbol of type 'Object \*' conjured at CFG element '\[\[Object alloc\] init\]'$}}}}
}
void test_2(void) {
diff --git a/clang/test/Analysis/loop-based-inlining-prevention.c b/clang/test/Analysis/loop-based-inlining-prevention.c
new file mode 100644
index 0000000..7362711
--- /dev/null
+++ b/clang/test/Analysis/loop-based-inlining-prevention.c
@@ -0,0 +1,200 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -verify=expected,default %s
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -analyzer-config inline-functions-with-ambiguous-loops=true -verify=expected,enabled %s
+
+// This file tests some heuristics in the engine that put functions on a
+// "do not inline" list if their analyisis reaches the `analyzer-max-loop`
+// limit (by default 4 iterations) in a loop. This was almost surely intended
+// as memoization optimization for the "retry without inlining" fallback (if we
+// had to retry once, next time don't even try inlining), but aggressively
+// oversteps the "natural" scope: reaching 4 iterations on _one particular_
+// execution path does not imply that each path would need "retry without
+// inlining" especially if a different call receives different arguments.
+//
+// This heuristic significantly affects the scope/depth of the analysis (and
+// therefore the execution time) because without this limitation on the
+// inlining significantly more entry points would be able to exhaust their
+// `max-nodes` quota. (Trivial thin wrappers around big complex functions are
+// common in many projects.)
+//
+// Unfortunately, this arbitrary heuristic strongly relies on the current loop
+// handling model and its many limitations, so improvements in loop handling
+// can cause surprising slowdowns by reducing the "do not inline" blacklist.
+// In the tests "FIXME-BUT-NEEDED" comments mark "problematic" (aka buggy)
+// analyzer behavior which cannot be fixed without also improving the
+// heuristics for (not) inlining large functions.
+
+ int getNum(void); // Get an unknown symbolic number.
+
+void clang_analyzer_dump(int arg);
+
+//-----------------------------------------------------------------------------
+// Simple case: inlined function never reaches `analyzer-max-loop`, so it is
+// always inlined.
+
+int inner_simple(int callIdx) {
+ clang_analyzer_dump(callIdx); // expected-warning {{1 S32}}
+ // expected-warning@-1 {{2 S32}}
+ return 42;
+}
+
+int outer_simple(void) {
+ int x = inner_simple(1);
+ int y = inner_simple(2);
+ return 53 / (x - y); // expected-warning {{Division by zero}}
+}
+
+//-----------------------------------------------------------------------------
+// Inlined function always reaches `analyzer-max-loop`, which stops the
+// analysis on that path and puts the function on the "do not inline" list.
+
+int inner_fixed_loop_1(int callIdx) {
+ int i;
+ clang_analyzer_dump(callIdx); // expected-warning {{1 S32}}
+ for (i = 0; i < 10; i++); // FIXME-BUT-NEEDED: This stops the analysis.
+ clang_analyzer_dump(callIdx); // no-warning
+ return 42;
+}
+
+int outer_fixed_loop_1(void) {
+ int x = inner_fixed_loop_1(1);
+ int y = inner_fixed_loop_1(2);
+
+ // FIXME-BUT-NEEDED: The analysis doesn't reach this zero division.
+ return 53 / (x - y); // no-warning
+}
+
+//-----------------------------------------------------------------------------
+// Inlined function always reaches `analyzer-max-loop`; inlining is prevented
+// even for different entry points.
+// NOTE: the analyzer happens to analyze the entry points in a reversed order,
+// so `outer_2_fixed_loop_2` is analyzed first and it will be the one which is
+// able to inline the inner function.
+
+int inner_fixed_loop_2(int callIdx) {
+ // Identical copy of inner_fixed_loop_1.
+ int i;
+ clang_analyzer_dump(callIdx); // expected-warning {{2 S32}}
+ for (i = 0; i < 10; i++); // FIXME-BUT-NEEDED: This stops the analysis.
+ clang_analyzer_dump(callIdx); // no-warning
+ return 42;
+}
+
+int outer_1_fixed_loop_2(void) {
+ return inner_fixed_loop_2(1);
+}
+
+int outer_2_fixed_loop_2(void) {
+ return inner_fixed_loop_2(2);
+}
+
+//-----------------------------------------------------------------------------
+// Inlined function reaches `analyzer-max-loop` only in its second call. The
+// function is inlined twice but the second call doesn't finish and ends up
+// being conservatively evaluated.
+
+int inner_parametrized_loop_1(int count) {
+ int i;
+ clang_analyzer_dump(count); // expected-warning {{2 S32}}
+ // expected-warning@-1 {{10 S32}}
+ for (i = 0; i < count; i++);
+ // FIXME-BUT-NEEDED: This loop stops the analysis when count >=4.
+ clang_analyzer_dump(count); // expected-warning {{2 S32}}
+ return 42;
+}
+
+int outer_parametrized_loop_1(void) {
+ int x = inner_parametrized_loop_1(2);
+ int y = inner_parametrized_loop_1(10);
+
+ // FIXME-BUT-NEEDED: The analysis doesn't reach this zero division.
+ return 53 / (x - y); // no-warning
+}
+
+//-----------------------------------------------------------------------------
+// Inlined function reaches `analyzer-max-loop` on its first call, so the
+// second call isn't inlined (although it could be fully evaluated).
+
+int inner_parametrized_loop_2(int count) {
+ // Identical copy of inner_parametrized_loop_1.
+ int i;
+ clang_analyzer_dump(count); // expected-warning {{10 S32}}
+ for (i = 0; i < count; i++);
+ // FIXME-BUT-NEEDED: This loop stops the analysis when count >=4.
+ clang_analyzer_dump(count); // no-warning
+ return 42;
+}
+
+int outer_parametrized_loop_2(void) {
+ int y = inner_parametrized_loop_2(10);
+ int x = inner_parametrized_loop_2(2);
+
+ // FIXME-BUT-NEEDED: The analysis doesn't reach this zero division.
+ return 53 / (x - y); // no-warning
+}
+
+//-----------------------------------------------------------------------------
+// Inlined function may or may not reach `analyzer-max-loop` depending on an
+// ambiguous check before the loop. This is very similar to the "fixed loop"
+// cases: the function is placed on the "don't inline" list when any execution
+// path reaches `analyzer-max-loop` (even if other execution paths reach the
+// end of the function).
+// NOTE: This is tested with two separate entry points to ensure that one
+// inlined call is fully evaluated before we try to inline the other call.
+// NOTE: the analyzer happens to analyze the entry points in a reversed order,
+// so `outer_2_conditional_loop` is analyzed first and it will be the one which
+// is able to inline the inner function.
+
+int inner_conditional_loop(int callIdx) {
+ int i;
+ clang_analyzer_dump(callIdx); // expected-warning {{2 S32}}
+ if (getNum() == 777) {
+ for (i = 0; i < 10; i++);
+ }
+ clang_analyzer_dump(callIdx); // expected-warning {{2 S32}}
+ return 42;
+}
+
+int outer_1_conditional_loop(void) {
+ return inner_conditional_loop(1);
+}
+
+int outer_2_conditional_loop(void) {
+ return inner_conditional_loop(2);
+}
+
+//-----------------------------------------------------------------------------
+// Inlined function executes an ambiguous loop that may or may not reach
+// `analyzer-max-loop`. Historically, before the "don't assume third iteration"
+// commit (bb27d5e5c6b194a1440b8ac4e5ace68d0ee2a849) this worked like the
+// `conditional_loop` cases: the analyzer was able to find a path reaching
+// `analyzer-max-loop` so inlining was disabled. After that commit the analyzer
+// does not _assume_ a third (or later) iteration (i.e. does not enter those
+// iterations if the loop condition is an unknown value), so e.g. this test
+// function does not reach `analyzer-max-loop` iterations and the inlining is
+// not disabled.
+// Unfortunately this change significantly increased the workload and
+// runtime of the analyzer (more entry points used up their budget), so the
+// option `inline-functions-with-ambiguous-loops` was introduced and disabled
+// by default to suppress the inlining in situations where the "don't assume
+// third iteration" logic activates.
+// NOTE: This is tested with two separate entry points to ensure that one
+// inlined call is fully evaluated before we try to inline the other call.
+// NOTE: the analyzer happens to analyze the entry points in a reversed order,
+// so `outer_2_ambiguous_loop` is analyzed first and it will be the one which
+// is able to inline the inner function.
+
+int inner_ambiguous_loop(int callIdx) {
+ int i;
+ clang_analyzer_dump(callIdx); // default-warning {{2 S32}}
+ // enabled-warning@-1 {{1 S32}}
+ // enabled-warning@-2 {{2 S32}}
+ for (i = 0; i < getNum(); i++);
+ return i;
+}
+
+int outer_1_ambiguous_loop(void) {
+ return inner_ambiguous_loop(1);
+}
+int outer_2_ambiguous_loop(void) {
+ return inner_ambiguous_loop(2);
+}
diff --git a/clang/test/Analysis/loop-unrolling.cpp b/clang/test/Analysis/loop-unrolling.cpp
index bf05a77..ebae81e 100644
--- a/clang/test/Analysis/loop-unrolling.cpp
+++ b/clang/test/Analysis/loop-unrolling.cpp
@@ -1,5 +1,5 @@
-// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -analyzer-config unroll-loops=true,cfg-loopexit=true -verify -std=c++14 -analyzer-config exploration_strategy=unexplored_first_queue %s
-// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -analyzer-config unroll-loops=true,cfg-loopexit=true,exploration_strategy=dfs -verify -std=c++14 -DDFS=1 %s
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -analyzer-config unroll-loops=true,cfg-loopexit=true -verify=expected,default -std=c++14 -analyzer-config exploration_strategy=unexplored_first_queue %s
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -analyzer-config unroll-loops=true,cfg-loopexit=true,exploration_strategy=dfs -verify=expected,dfs -std=c++14 %s
void clang_analyzer_numTimesReached();
void clang_analyzer_warnIfReached();
@@ -337,6 +337,7 @@ int nested_both_unrolled() {
}
int simple_known_bound_loop() {
+ // Iteration count visible: can be unrolled and fully executed.
for (int i = 2; i < 12; i++) {
// This function is inlined in nested_inlined_unroll1()
clang_analyzer_numTimesReached(); // expected-warning {{90}}
@@ -345,27 +346,42 @@ int simple_known_bound_loop() {
}
int simple_unknown_bound_loop() {
+ // Iteration count unknown: unrolling won't happen and the execution will be
+ // split two times:
+ // (1) split between skipped loop (immediate exit) and entering the loop
+ // (2) split between exit after 1 iteration and entering the second iteration
+ // After these there is no third state split because the "don't assume third
+ // iteration" logic in `ExprEngine::processBranch` prevents it; but the
+ // `legacy-inlining-prevention` logic will put this function onto the list of
+ // functions that may not be inlined in the future.
+ // The exploration strategy apparently influences the number of times this
+ // function can be inlined before it's placed on the "don't inline" list.
for (int i = 2; i < getNum(); i++) {
- clang_analyzer_numTimesReached(); // expected-warning {{8}}
+ clang_analyzer_numTimesReached(); // default-warning {{4}} dfs-warning {{8}}
}
return 0;
}
int nested_inlined_unroll1() {
+ // Here the analyzer can unroll and fully execute both the outer loop and the
+ // inner loop within simple_known_bound_loop().
int k;
for (int i = 0; i < 9; i++) {
clang_analyzer_numTimesReached(); // expected-warning {{9}}
- k = simple_known_bound_loop(); // no reevaluation without inlining
+ k = simple_known_bound_loop();
}
int a = 22 / k; // expected-warning {{Division by zero}}
return 0;
}
int nested_inlined_no_unroll1() {
+ // Here no unrolling happens and we only run `analyzer-max-loop` (= 4)
+ // iterations of the loop within this function, but some state splits happen
+ // in `simple_unknown_bound_loop()` calls.
int k;
- for (int i = 0; i < 9; i++) {
- clang_analyzer_numTimesReached(); // expected-warning {{10}}
- k = simple_unknown_bound_loop(); // reevaluation without inlining, splits the state as well
+ for (int i = 0; i < 40; i++) {
+ clang_analyzer_numTimesReached(); // default-warning {{9}} dfs-warning {{12}}
+ k = simple_unknown_bound_loop();
}
int a = 22 / k; // no-warning
return 0;
diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c
index 1845d3b..916246e 100644
--- a/clang/test/CIR/CodeGen/basic.c
+++ b/clang/test/CIR/CodeGen/basic.c
@@ -253,3 +253,29 @@ size_type max_size(void) {
// OGCG: define{{.*}} i64 @max_size()
// OGCG: ret i64 2305843009213693951
+// CHECK: cir.store %5, %0 : !u64i, !cir.ptr<!u64i>
+// CHECK: %6 = cir.load %0 : !cir.ptr<!u64i>, !u64i
+// CHECK: cir.return %6 : !u64i
+// CHECK: }
+
+enum A {
+ A_one,
+ A_two
+};
+enum A a;
+
+// CHECK: cir.global external @a = #cir.int<0> : !u32i
+
+enum B : int;
+enum B b;
+
+// CHECK: cir.global external @b = #cir.int<0> : !u32i
+
+
+enum C : int {
+ C_one,
+ C_two
+};
+enum C c;
+
+// CHECK: cir.global external @c = #cir.int<0> : !u32i
diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp
index b5b3e36..43542c2 100644
--- a/clang/test/CIR/CodeGen/basic.cpp
+++ b/clang/test/CIR/CodeGen/basic.cpp
@@ -102,6 +102,10 @@ size_type max_size() {
// CHECK: %3 = cir.cast(integral, %2 : !s32i), !u64i
// CHECK: %4 = cir.const #cir.int<8> : !u64i
// CHECK: %5 = cir.binop(div, %3, %4) : !u64i
+// CHECK: cir.store %5, %0 : !u64i, !cir.ptr<!u64i>
+// CHECK: %6 = cir.load %0 : !cir.ptr<!u64i>, !u64i
+// CHECK: cir.return %6 : !u64i
+// CHECK: }
void ref_arg(int &x) {
int y = x;
@@ -141,3 +145,29 @@ void ref_local(short x) {
// CHECK: %[[Y_REF_ADDR:.*]] = cir.alloca !cir.ptr<!s16i>, !cir.ptr<!cir.ptr<!s16i>>, ["y", init, const] {alignment = 8 : i64}
// CHECK: cir.store %[[ARG]], %[[X_ADDR]] : !s16i, !cir.ptr<!s16i>
// CHECK: cir.store %[[X_ADDR]], %[[Y_REF_ADDR]] : !cir.ptr<!s16i>, !cir.ptr<!cir.ptr<!s16i>>
+
+enum A {
+ A_one,
+ A_two
+};
+enum A a;
+
+// CHECK: cir.global external @a = #cir.int<0> : !u32i
+
+enum B : int;
+enum B b;
+
+// CHECK: cir.global external @b = #cir.int<0> : !s32i
+
+enum C : int {
+ C_one,
+ C_two
+};
+enum C c;
+
+// CHECK: cir.global external @c = #cir.int<0> : !s32i
+
+enum class D : int;
+enum D d;
+
+// CHECK: cir.global external @d = #cir.int<0> : !s32i
diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp
index 0756497..095bff1 100644
--- a/clang/test/CIR/CodeGen/vector-ext.cpp
+++ b/clang/test/CIR/CodeGen/vector-ext.cpp
@@ -213,3 +213,126 @@ void foo4() {
// OGCG: %[[TMP2:.*]] = load i32, ptr %[[IDX]], align 4
// OGCG: %[[ELE:.*]] = extractelement <4 x i32> %[[TMP1]], i32 %[[TMP2]]
// OGCG: store i32 %[[ELE]], ptr %[[INIT]], align 4
+
+void foo5() {
+ vi4 a = { 1, 2, 3, 4 };
+
+ a[2] = 5;
+}
+
+// CIR: %[[VEC:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i
+// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i
+// CIR: %[[VEC_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] :
+// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: %[[CONST_VAL:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: %[[CONST_IDX:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[TMP:.*]] = cir.load %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: %[[NEW_VEC:.*]] = cir.vec.insert %[[CONST_VAL]], %[[TMP]][%[[CONST_IDX]] : !s32i] : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[NEW_VEC]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+
+// LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16
+// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// LLVM: %[[TMP:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// LLVM: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP]], i32 5, i32 2
+// LLVM: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+// OGCG: %[[VEC:.*]] = alloca <4 x i32>, align 16
+// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// OGCG: %[[TMP:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// OGCG: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP]], i32 5, i32 2
+// OGCG: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+void foo6() {
+ vi4 a = { 1, 2, 3, 4 };
+ int idx = 2;
+ int value = 5;
+ a[idx] = value;
+}
+
+// CIR: %[[VEC:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+// CIR: %[[IDX:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["idx", init]
+// CIR: %[[VAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["value", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i
+// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i
+// CIR: %[[VEC_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] :
+// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: %[[CONST_IDX:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: cir.store %[[CONST_IDX]], %[[IDX]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[CONST_VAL:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store %[[CONST_VAL]], %[[VAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP1:.*]] = cir.load %[[VAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[TMP2:.*]] = cir.load %[[IDX]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[TMP3:.*]] = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: %[[NEW_VEC:.*]] = cir.vec.insert %[[TMP1]], %[[TMP3]][%[[TMP2]] : !s32i] : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[NEW_VEC]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+
+// LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16
+// LLVM: %[[IDX:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[VAL:.*]] = alloca i32, i64 1, align 4
+// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %1, align 16
+// LLVM: store i32 2, ptr %[[IDX]], align 4
+// LLVM: store i32 5, ptr %[[VAL]], align 4
+// LLVM: %[[TMP1:.*]] = load i32, ptr %[[VAL]], align 4
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[IDX]], align 4
+// LLVM: %[[TMP3:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// LLVM: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP3]], i32 %[[TMP1]], i32 %[[TMP2]]
+// LLVM: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+// OGCG: %[[VEC:.*]] = alloca <4 x i32>, align 16
+// OGCG: %[[IDX:.*]] = alloca i32, align 4
+// OGCG: %[[VAL:.*]] = alloca i32, align 4
+// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// OGCG: store i32 2, ptr %[[IDX]], align 4
+// OGCG: store i32 5, ptr %[[VAL]], align 4
+// OGCG: %[[TMP1:.*]] = load i32, ptr %[[VAL]], align 4
+// OGCG: %[[TMP2:.*]] = load i32, ptr %[[IDX]], align 4
+// OGCG: %[[TMP3:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// OGCG: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP3]], i32 %[[TMP1]], i32 %[[TMP2]]
+// OGCG: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+void foo7() {
+ vi4 a = {1, 2, 3, 4};
+ a[2] += 5;
+}
+
+// CIR: %[[VEC:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i
+// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i
+// CIR: %[[VEC_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] :
+// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: %[[CONST_VAL:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: %[[CONST_IDX:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[TMP:.*]] = cir.load %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: %[[ELE:.*]] = cir.vec.extract %[[TMP]][%[[CONST_IDX]] : !s32i] : !cir.vector<4 x !s32i>
+// CIR: %[[RES:.*]] = cir.binop(add, %[[ELE]], %[[CONST_VAL]]) nsw : !s32i
+// CIR: %[[TMP2:.*]] = cir.load %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: %[[NEW_VEC:.*]] = cir.vec.insert %[[RES]], %[[TMP2]][%[[CONST_IDX]] : !s32i] : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[NEW_VEC]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+
+// LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16
+// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// LLVM: %[[TMP:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// LLVM: %[[ELE:.*]] = extractelement <4 x i32> %[[TMP]], i32 2
+// LLVM: %[[RES:.*]] = add nsw i32 %[[ELE]], 5
+// LLVM: %[[TMP2:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// LLVM: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP2]], i32 %[[RES]], i32 2
+// LLVM: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+// OGCG: %[[VEC:.*]] = alloca <4 x i32>, align 16
+// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// OGCG: %[[TMP:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// OGCG: %[[ELE:.*]] = extractelement <4 x i32> %[[TMP]], i32 2
+// OGCG: %[[RES:.*]] = add nsw i32 %[[ELE]], 5
+// OGCG: %[[TMP2:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// OGCG: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP2]], i32 %[[RES]], i32 2
+// OGCG: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp
index 5300181..aaf53b9 100644
--- a/clang/test/CIR/CodeGen/vector.cpp
+++ b/clang/test/CIR/CodeGen/vector.cpp
@@ -201,3 +201,126 @@ void foo4() {
// OGCG: %[[TMP2:.*]] = load i32, ptr %[[IDX]], align 4
// OGCG: %[[ELE:.*]] = extractelement <4 x i32> %[[TMP1]], i32 %[[TMP2]]
// OGCG: store i32 %[[ELE]], ptr %[[INIT]], align 4
+
+void foo5() {
+ vi4 a = { 1, 2, 3, 4 };
+
+ a[2] = 5;
+}
+
+// CIR: %[[VEC:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i
+// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i
+// CIR: %[[VEC_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] :
+// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: %[[CONST_VAL:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: %[[CONST_IDX:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[TMP:.*]] = cir.load %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: %[[NEW_VEC:.*]] = cir.vec.insert %[[CONST_VAL]], %[[TMP]][%[[CONST_IDX]] : !s32i] : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[NEW_VEC]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+
+// LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16
+// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// LLVM: %[[TMP:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// LLVM: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP]], i32 5, i32 2
+// LLVM: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+// OGCG: %[[VEC:.*]] = alloca <4 x i32>, align 16
+// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// OGCG: %[[TMP:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// OGCG: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP]], i32 5, i32 2
+// OGCG: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+void foo6() {
+ vi4 a = { 1, 2, 3, 4 };
+ int idx = 2;
+ int value = 5;
+ a[idx] = value;
+}
+
+// CIR: %[[VEC:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+// CIR: %[[IDX:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["idx", init]
+// CIR: %[[VAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["value", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i
+// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i
+// CIR: %[[VEC_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] :
+// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: %[[CONST_IDX:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: cir.store %[[CONST_IDX]], %[[IDX]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[CONST_VAL:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store %[[CONST_VAL]], %[[VAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP1:.*]] = cir.load %[[VAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[TMP2:.*]] = cir.load %[[IDX]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[TMP3:.*]] = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: %[[NEW_VEC:.*]] = cir.vec.insert %[[TMP1]], %[[TMP3]][%[[TMP2]] : !s32i] : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[NEW_VEC]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+
+// LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16
+// LLVM: %[[IDX:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[VAL:.*]] = alloca i32, i64 1, align 4
+// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %1, align 16
+// LLVM: store i32 2, ptr %[[IDX]], align 4
+// LLVM: store i32 5, ptr %[[VAL]], align 4
+// LLVM: %[[TMP1:.*]] = load i32, ptr %[[VAL]], align 4
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[IDX]], align 4
+// LLVM: %[[TMP3:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// LLVM: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP3]], i32 %[[TMP1]], i32 %[[TMP2]]
+// LLVM: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+// OGCG: %[[VEC:.*]] = alloca <4 x i32>, align 16
+// OGCG: %[[IDX:.*]] = alloca i32, align 4
+// OGCG: %[[VAL:.*]] = alloca i32, align 4
+// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// OGCG: store i32 2, ptr %[[IDX]], align 4
+// OGCG: store i32 5, ptr %[[VAL]], align 4
+// OGCG: %[[TMP1:.*]] = load i32, ptr %[[VAL]], align 4
+// OGCG: %[[TMP2:.*]] = load i32, ptr %[[IDX]], align 4
+// OGCG: %[[TMP3:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// OGCG: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP3]], i32 %[[TMP1]], i32 %[[TMP2]]
+// OGCG: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+void foo7() {
+ vi4 a = {1, 2, 3, 4};
+ a[2] += 5;
+}
+
+// CIR: %[[VEC:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i
+// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i
+// CIR: %[[VEC_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] :
+// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: %[[CONST_VAL:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: %[[CONST_IDX:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[TMP:.*]] = cir.load %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: %[[ELE:.*]] = cir.vec.extract %[[TMP]][%[[CONST_IDX]] : !s32i] : !cir.vector<4 x !s32i>
+// CIR: %[[RES:.*]] = cir.binop(add, %[[ELE]], %[[CONST_VAL]]) nsw : !s32i
+// CIR: %[[TMP2:.*]] = cir.load %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: %[[NEW_VEC:.*]] = cir.vec.insert %[[RES]], %[[TMP2]][%[[CONST_IDX]] : !s32i] : !cir.vector<4 x !s32i>
+// CIR: cir.store %[[NEW_VEC]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+
+// LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16
+// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// LLVM: %[[TMP:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// LLVM: %[[ELE:.*]] = extractelement <4 x i32> %[[TMP]], i32 2
+// LLVM: %[[RES:.*]] = add nsw i32 %[[ELE]], 5
+// LLVM: %[[TMP2:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// LLVM: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP2]], i32 %[[RES]], i32 2
+// LLVM: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
+
+// OGCG: %[[VEC:.*]] = alloca <4 x i32>, align 16
+// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC]], align 16
+// OGCG: %[[TMP:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// OGCG: %[[ELE:.*]] = extractelement <4 x i32> %[[TMP]], i32 2
+// OGCG: %[[RES:.*]] = add nsw i32 %[[ELE]], 5
+// OGCG: %[[TMP2:.*]] = load <4 x i32>, ptr %[[VEC]], align 16
+// OGCG: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP2]], i32 %[[RES]], i32 2
+// OGCG: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16
diff --git a/clang/test/CIR/IR/vector.cir b/clang/test/CIR/IR/vector.cir
index aeb268e..21a1f0a 100644
--- a/clang/test/CIR/IR/vector.cir
+++ b/clang/test/CIR/IR/vector.cir
@@ -97,4 +97,42 @@ cir.func @vector_extract_element_test() {
// CHECK: cir.return
// CHECK: }
+cir.func @vector_insert_element_test() {
+ %0 = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+ %1 = cir.const #cir.int<1> : !s32i
+ %2 = cir.const #cir.int<2> : !s32i
+ %3 = cir.const #cir.int<3> : !s32i
+ %4 = cir.const #cir.int<4> : !s32i
+ %5 = cir.vec.create(%1, %2, %3, %4 : !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i>
+ cir.store %5, %0 : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+ %6 = cir.const #cir.int<5> : !s32i
+ %7 = cir.const #cir.int<2> : !s32i
+ %8 = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+ %9 = cir.vec.extract %8[%7 : !s32i] : !cir.vector<4 x !s32i>
+ %10 = cir.binop(add, %9, %6) nsw : !s32i
+ %11 = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+ %12 = cir.vec.insert %10, %11[%7 : !s32i] : !cir.vector<4 x !s32i>
+ cir.store %12, %0 : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+ cir.return
+}
+
+// CHECK: cir.func @vector_insert_element_test() {
+// CHECK: %0 = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+// CHECK: %1 = cir.const #cir.int<1> : !s32i
+// CHECK: %2 = cir.const #cir.int<2> : !s32i
+// CHECK: %3 = cir.const #cir.int<3> : !s32i
+// CHECK: %4 = cir.const #cir.int<4> : !s32i
+// CHECK: %5 = cir.vec.create(%1, %2, %3, %4 : !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i>
+// CHECK: cir.store %5, %0 : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CHECK: %6 = cir.const #cir.int<5> : !s32i
+// CHECK: %7 = cir.const #cir.int<2> : !s32i
+// CHECK: %8 = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CHECK: %9 = cir.vec.extract %8[%7 : !s32i] : !cir.vector<4 x !s32i>
+// CHECK: %10 = cir.binop(add, %9, %6) nsw : !s32i
+// CHECK: %11 = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CHECK: %12 = cir.vec.insert %10, %11[%7 : !s32i] : !cir.vector<4 x !s32i>
+// CHECK: cir.store %12, %0 : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CHECK: cir.return
+// CHECK: }
+
}
diff --git a/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c b/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
index 1391a1b..36c3c7f 100644
--- a/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
+++ b/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
@@ -16,7 +16,7 @@ void func(int *restrict a, int *restrict b) {
// CHECK256-COUNT-8: str
// CHECK512-COUNT-4: str
// CHECK1024-COUNT-2: str
-// CHECK2048-COUNT-1: st1w
+// CHECK2048-COUNT-1: str
#pragma clang loop vectorize(enable)
for (int i = 0; i < 64; ++i)
a[i] += b[i];
diff --git a/clang/test/CodeGen/cfi-check-fail-debuginfo.c b/clang/test/CodeGen/cfi-check-fail-debuginfo.c
new file mode 100644
index 0000000..cd5ec56
--- /dev/null
+++ b/clang/test/CodeGen/cfi-check-fail-debuginfo.c
@@ -0,0 +1,45 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -O2 -fsanitize-cfi-cross-dso \
+// RUN: -fsanitize=cfi-icall,cfi-nvcall,cfi-vcall,cfi-unrelated-cast,cfi-derived-cast \
+// RUN: -fsanitize-trap=cfi-icall,cfi-nvcall -fsanitize-recover=cfi-vcall,cfi-unrelated-cast \
+// RUN: -fsanitize-annotate-debug-info=cfi-icall,cfi-nvcall,cfi-vcall,cfi-unrelated-cast,cfi-derived-cast \
+// RUN: -fdebug-prefix-map=%S/= -fno-ident -fdebug-compilation-dir=%S -debug-info-kind=limited \
+// RUN: -emit-llvm -o - %s | FileCheck %s
+
+// CHECK-LABEL: define dso_local void @caller(
+// CHECK-SAME: ptr noundef [[F:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] !dbg [[DBG7:![0-9]+]] !type [[META16:![0-9]+]] !type [[META17:![0-9]+]] !type [[META18:![0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: #dbg_value(ptr [[F]], [[META15:![0-9]+]], !DIExpression(), [[META19:![0-9]+]])
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.type.test(ptr [[F]], metadata !"_ZTSFvvE"), !dbg [[DBG20:![0-9]+]], !nosanitize [[META21:![0-9]+]]
+// CHECK-NEXT: br i1 [[TMP0]], label %[[CFI_CONT:.*]], label %[[CFI_SLOWPATH:.*]], !dbg [[DBG20]], !prof [[PROF22:![0-9]+]], !nosanitize [[META21]]
+// CHECK: [[CFI_SLOWPATH]]:
+// CHECK-NEXT: tail call void @__cfi_slowpath(i64 9080559750644022485, ptr [[F]]) #[[ATTR6:[0-9]+]], !dbg [[DBG20]], !nosanitize [[META21]]
+// CHECK-NEXT: br label %[[CFI_CONT]], !dbg [[DBG20]], !nosanitize [[META21]]
+// CHECK: [[CFI_CONT]]:
+// CHECK-NEXT: tail call void [[F]]() #[[ATTR6]], !dbg [[DBG20]]
+// CHECK-NEXT: ret void, !dbg [[DBG23:![0-9]+]]
+//
+void caller(void (*f)(void)) {
+ f();
+}
+//.
+// CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+// CHECK: [[META1]] = !DIFile(filename: "{{.*}}<stdin>", directory: {{.*}})
+// CHECK: [[DBG7]] = distinct !DISubprogram(name: "caller", scope: [[META8:![0-9]+]], file: [[META8]], line: 22, type: [[META9:![0-9]+]], scopeLine: 22, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META14:![0-9]+]])
+// CHECK: [[META8]] = !DIFile(filename: "{{.*}}cfi-check-fail-debuginfo.c", directory: {{.*}})
+// CHECK: [[META9]] = !DISubroutineType(types: [[META10:![0-9]+]])
+// CHECK: [[META10]] = !{null, [[META11:![0-9]+]]}
+// CHECK: [[META11]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META12:![0-9]+]], size: 64)
+// CHECK: [[META12]] = !DISubroutineType(types: [[META13:![0-9]+]])
+// CHECK: [[META13]] = !{null}
+// CHECK: [[META14]] = !{[[META15]]}
+// CHECK: [[META15]] = !DILocalVariable(name: "f", arg: 1, scope: [[DBG7]], file: [[META8]], line: 22, type: [[META11]])
+// CHECK: [[META16]] = !{i64 0, !"_ZTSFvPFvvEE"}
+// CHECK: [[META17]] = !{i64 0, !"_ZTSFvPvE.generalized"}
+// CHECK: [[META18]] = !{i64 0, i64 2451761621477796417}
+// CHECK: [[META19]] = !DILocation(line: 0, scope: [[DBG7]])
+// CHECK: [[DBG20]] = !DILocation(line: 23, column: 3, scope: [[DBG7]])
+// CHECK: [[META21]] = !{}
+// CHECK: [[PROF22]] = !{!"branch_weights", i32 1048575, i32 1}
+// CHECK: [[DBG23]] = !DILocation(line: 24, column: 1, scope: [[DBG7]])
+//.
diff --git a/clang/test/CodeGen/cfi-icall-generalize-debuginfo.c b/clang/test/CodeGen/cfi-icall-generalize-debuginfo.c
new file mode 100644
index 0000000..8b18452
--- /dev/null
+++ b/clang/test/CodeGen/cfi-icall-generalize-debuginfo.c
@@ -0,0 +1,126 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -O2 -fsanitize=cfi-icall -fsanitize-trap=cfi-icall -emit-llvm -o - %s \
+// RUN: -fsanitize-annotate-debug-info=cfi-icall,cfi-nvcall,cfi-vcall,cfi-unrelated-cast,cfi-derived-cast \
+// RUN: -fdebug-prefix-map=%S/= -fno-ident -fdebug-compilation-dir=%S -debug-info-kind=limited \
+// RUN: | FileCheck --check-prefix=CHECK --check-prefix=UNGENERALIZED %s
+//
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -O2 -fsanitize=cfi-icall -fsanitize-trap=cfi-icall -fsanitize-cfi-icall-generalize-pointers -emit-llvm -o - %s \
+// RUN: -fsanitize-annotate-debug-info=cfi-icall,cfi-nvcall,cfi-vcall,cfi-unrelated-cast,cfi-derived-cast \
+// RUN: -fdebug-prefix-map=%S/= -fno-ident -fdebug-compilation-dir=%S -debug-info-kind=limited \
+// RUN: | FileCheck --check-prefix=CHECK --check-prefix=GENERALIZED %s
+
+// Test that const char* is generalized to const ptr and that const char** is
+// generalized to ptr
+
+// CHECK-LABEL: define dso_local noalias noundef ptr @f(
+// CHECK-SAME: ptr noundef readnone captures(none) [[A:%.*]], ptr noundef readnone captures(none) [[B:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] !dbg [[DBG10:![0-9]+]] !type [[META21:![0-9]+]] !type [[META22:![0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: #dbg_value(ptr [[A]], [[META19:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
+// CHECK-NEXT: #dbg_value(ptr [[B]], [[META20:![0-9]+]], !DIExpression(), [[META23]])
+// CHECK-NEXT: ret ptr null, !dbg [[DBG24:![0-9]+]]
+//
+int** f(const char *a, const char **b) {
+ return (int**)0;
+}
+
+// UNGENERALIZED-LABEL: define dso_local void @g(
+// UNGENERALIZED-SAME: ptr noundef [[FP:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] !dbg [[DBG25:![0-9]+]] !type [[META31:![0-9]+]] !type [[META32:![0-9]+]] {
+// UNGENERALIZED-NEXT: [[ENTRY:.*:]]
+// UNGENERALIZED-NEXT: #dbg_value(ptr [[FP]], [[META30:![0-9]+]], !DIExpression(), [[META33:![0-9]+]])
+// UNGENERALIZED-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.type.test(ptr [[FP]], metadata !"_ZTSFPPiPKcPS2_E"), !dbg [[DBG34:![0-9]+]], !nosanitize [[META35:![0-9]+]]
+// UNGENERALIZED-NEXT: br i1 [[TMP0]], label %[[CONT:.*]], label %[[TRAP:.*]], !dbg [[DBG34]], !prof [[PROF36:![0-9]+]], !nosanitize [[META35]]
+// UNGENERALIZED: [[TRAP]]:
+// UNGENERALIZED-NEXT: tail call void @llvm.ubsantrap(i8 2) #[[ATTR4:[0-9]+]], !dbg [[DBG34]], !nosanitize [[META35]]
+// UNGENERALIZED-NEXT: unreachable, !dbg [[DBG34]], !nosanitize [[META35]]
+// UNGENERALIZED: [[CONT]]:
+// UNGENERALIZED-NEXT: [[CALL:%.*]] = tail call ptr [[FP]](ptr noundef null, ptr noundef null) #[[ATTR5:[0-9]+]], !dbg [[DBG34]]
+// UNGENERALIZED-NEXT: ret void, !dbg [[DBG37:![0-9]+]]
+//
+// GENERALIZED-LABEL: define dso_local void @g(
+// GENERALIZED-SAME: ptr noundef [[FP:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] !dbg [[DBG25:![0-9]+]] !type [[META31:![0-9]+]] !type [[META32:![0-9]+]] {
+// GENERALIZED-NEXT: [[ENTRY:.*:]]
+// GENERALIZED-NEXT: #dbg_value(ptr [[FP]], [[META30:![0-9]+]], !DIExpression(), [[META33:![0-9]+]])
+// GENERALIZED-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.type.test(ptr [[FP]], metadata !"_ZTSFPvPKvS_E.generalized"), !dbg [[DBG34:![0-9]+]], !nosanitize [[META35:![0-9]+]]
+// GENERALIZED-NEXT: br i1 [[TMP0]], label %[[CONT:.*]], label %[[TRAP:.*]], !dbg [[DBG34]], !prof [[PROF36:![0-9]+]], !nosanitize [[META35]]
+// GENERALIZED: [[TRAP]]:
+// GENERALIZED-NEXT: tail call void @llvm.ubsantrap(i8 2) #[[ATTR4:[0-9]+]], !dbg [[DBG34]], !nosanitize [[META35]]
+// GENERALIZED-NEXT: unreachable, !dbg [[DBG34]], !nosanitize [[META35]]
+// GENERALIZED: [[CONT]]:
+// GENERALIZED-NEXT: [[CALL:%.*]] = tail call ptr [[FP]](ptr noundef null, ptr noundef null) #[[ATTR5:[0-9]+]], !dbg [[DBG34]]
+// GENERALIZED-NEXT: ret void, !dbg [[DBG37:![0-9]+]]
+//
+void g(int** (*fp)(const char *, const char **)) {
+ fp(0, 0);
+}
+
+//.
+// UNGENERALIZED: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, retainedTypes: [[META2:![0-9]+]], splitDebugInlining: false, nameTableKind: None)
+// UNGENERALIZED: [[META1]] = !DIFile(filename: "{{.*}}<stdin>", directory: {{.*}})
+// UNGENERALIZED: [[META2]] = !{[[META3:![0-9]+]]}
+// UNGENERALIZED: [[META3]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META4:![0-9]+]], size: 64)
+// UNGENERALIZED: [[META4]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META5:![0-9]+]], size: 64)
+// UNGENERALIZED: [[META5]] = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+// UNGENERALIZED: [[DBG10]] = distinct !DISubprogram(name: "f", scope: [[META11:![0-9]+]], file: [[META11]], line: 22, type: [[META12:![0-9]+]], scopeLine: 22, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META18:![0-9]+]])
+// UNGENERALIZED: [[META11]] = !DIFile(filename: "{{.*}}cfi-icall-generalize-debuginfo.c", directory: {{.*}})
+// UNGENERALIZED: [[META12]] = !DISubroutineType(types: [[META13:![0-9]+]])
+// UNGENERALIZED: [[META13]] = !{[[META3]], [[META14:![0-9]+]], [[META17:![0-9]+]]}
+// UNGENERALIZED: [[META14]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META15:![0-9]+]], size: 64)
+// UNGENERALIZED: [[META15]] = !DIDerivedType(tag: DW_TAG_const_type, baseType: [[META16:![0-9]+]])
+// UNGENERALIZED: [[META16]] = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+// UNGENERALIZED: [[META17]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META14]], size: 64)
+// UNGENERALIZED: [[META18]] = !{[[META19]], [[META20]]}
+// UNGENERALIZED: [[META19]] = !DILocalVariable(name: "a", arg: 1, scope: [[DBG10]], file: [[META11]], line: 22, type: [[META14]])
+// UNGENERALIZED: [[META20]] = !DILocalVariable(name: "b", arg: 2, scope: [[DBG10]], file: [[META11]], line: 22, type: [[META17]])
+// UNGENERALIZED: [[META21]] = !{i64 0, !"_ZTSFPPiPKcPS2_E"}
+// UNGENERALIZED: [[META22]] = !{i64 0, !"_ZTSFPvPKvS_E.generalized"}
+// UNGENERALIZED: [[META23]] = !DILocation(line: 0, scope: [[DBG10]])
+// UNGENERALIZED: [[DBG24]] = !DILocation(line: 23, column: 3, scope: [[DBG10]])
+// UNGENERALIZED: [[DBG25]] = distinct !DISubprogram(name: "g", scope: [[META11]], file: [[META11]], line: 52, type: [[META26:![0-9]+]], scopeLine: 52, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META29:![0-9]+]])
+// UNGENERALIZED: [[META26]] = !DISubroutineType(types: [[META27:![0-9]+]])
+// UNGENERALIZED: [[META27]] = !{null, [[META28:![0-9]+]]}
+// UNGENERALIZED: [[META28]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META12]], size: 64)
+// UNGENERALIZED: [[META29]] = !{[[META30]]}
+// UNGENERALIZED: [[META30]] = !DILocalVariable(name: "fp", arg: 1, scope: [[DBG25]], file: [[META11]], line: 52, type: [[META28]])
+// UNGENERALIZED: [[META31]] = !{i64 0, !"_ZTSFvPFPPiPKcPS2_EE"}
+// UNGENERALIZED: [[META32]] = !{i64 0, !"_ZTSFvPvE.generalized"}
+// UNGENERALIZED: [[META33]] = !DILocation(line: 0, scope: [[DBG25]])
+// UNGENERALIZED: [[DBG34]] = !DILocation(line: 53, column: 3, scope: [[DBG25]])
+// UNGENERALIZED: [[META35]] = !{}
+// UNGENERALIZED: [[PROF36]] = !{!"branch_weights", i32 1048575, i32 1}
+// UNGENERALIZED: [[DBG37]] = !DILocation(line: 54, column: 1, scope: [[DBG25]])
+//.
+// GENERALIZED: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, retainedTypes: [[META2:![0-9]+]], splitDebugInlining: false, nameTableKind: None)
+// GENERALIZED: [[META1]] = !DIFile(filename: "{{.*}}<stdin>", directory: {{.*}})
+// GENERALIZED: [[META2]] = !{[[META3:![0-9]+]]}
+// GENERALIZED: [[META3]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META4:![0-9]+]], size: 64)
+// GENERALIZED: [[META4]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META5:![0-9]+]], size: 64)
+// GENERALIZED: [[META5]] = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+// GENERALIZED: [[DBG10]] = distinct !DISubprogram(name: "f", scope: [[META11:![0-9]+]], file: [[META11]], line: 22, type: [[META12:![0-9]+]], scopeLine: 22, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META18:![0-9]+]])
+// GENERALIZED: [[META11]] = !DIFile(filename: "{{.*}}cfi-icall-generalize-debuginfo.c", directory: {{.*}})
+// GENERALIZED: [[META12]] = !DISubroutineType(types: [[META13:![0-9]+]])
+// GENERALIZED: [[META13]] = !{[[META3]], [[META14:![0-9]+]], [[META17:![0-9]+]]}
+// GENERALIZED: [[META14]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META15:![0-9]+]], size: 64)
+// GENERALIZED: [[META15]] = !DIDerivedType(tag: DW_TAG_const_type, baseType: [[META16:![0-9]+]])
+// GENERALIZED: [[META16]] = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+// GENERALIZED: [[META17]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META14]], size: 64)
+// GENERALIZED: [[META18]] = !{[[META19]], [[META20]]}
+// GENERALIZED: [[META19]] = !DILocalVariable(name: "a", arg: 1, scope: [[DBG10]], file: [[META11]], line: 22, type: [[META14]])
+// GENERALIZED: [[META20]] = !DILocalVariable(name: "b", arg: 2, scope: [[DBG10]], file: [[META11]], line: 22, type: [[META17]])
+// GENERALIZED: [[META21]] = !{i64 0, !"_ZTSFPPiPKcPS2_E"}
+// GENERALIZED: [[META22]] = !{i64 0, !"_ZTSFPvPKvS_E.generalized"}
+// GENERALIZED: [[META23]] = !DILocation(line: 0, scope: [[DBG10]])
+// GENERALIZED: [[DBG24]] = !DILocation(line: 23, column: 3, scope: [[DBG10]])
+// GENERALIZED: [[DBG25]] = distinct !DISubprogram(name: "g", scope: [[META11]], file: [[META11]], line: 52, type: [[META26:![0-9]+]], scopeLine: 52, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META29:![0-9]+]])
+// GENERALIZED: [[META26]] = !DISubroutineType(types: [[META27:![0-9]+]])
+// GENERALIZED: [[META27]] = !{null, [[META28:![0-9]+]]}
+// GENERALIZED: [[META28]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META12]], size: 64)
+// GENERALIZED: [[META29]] = !{[[META30]]}
+// GENERALIZED: [[META30]] = !DILocalVariable(name: "fp", arg: 1, scope: [[DBG25]], file: [[META11]], line: 52, type: [[META28]])
+// GENERALIZED: [[META31]] = !{i64 0, !"_ZTSFvPFPPiPKcPS2_EE"}
+// GENERALIZED: [[META32]] = !{i64 0, !"_ZTSFvPvE.generalized"}
+// GENERALIZED: [[META33]] = !DILocation(line: 0, scope: [[DBG25]])
+// GENERALIZED: [[DBG34]] = !DILocation(line: 53, column: 3, scope: [[DBG25]])
+// GENERALIZED: [[META35]] = !{}
+// GENERALIZED: [[PROF36]] = !{!"branch_weights", i32 1048575, i32 1}
+// GENERALIZED: [[DBG37]] = !DILocation(line: 54, column: 1, scope: [[DBG25]])
+//.
diff --git a/clang/test/CodeGen/cfi-icall-normalize2-debuginfo.c b/clang/test/CodeGen/cfi-icall-normalize2-debuginfo.c
new file mode 100644
index 0000000..00f416e
--- /dev/null
+++ b/clang/test/CodeGen/cfi-icall-normalize2-debuginfo.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -O2 -fsanitize=cfi-icall -fsanitize-trap=cfi-icall -fsanitize-cfi-icall-experimental-normalize-integers -emit-llvm -o - %s \
+// RUN: -fsanitize-annotate-debug-info=cfi-icall,cfi-nvcall,cfi-vcall,cfi-unrelated-cast,cfi-derived-cast \
+// RUN: -fdebug-prefix-map=%S/= -fno-ident -fdebug-compilation-dir=%S -debug-info-kind=limited \
+// RUN: | FileCheck %s
+
+// Test that normalized type metadata for functions are emitted for cross-language CFI support with
+// other languages that can't represent and encode C/C++ integer types.
+
+// CHECK-LABEL: define dso_local void @foo(
+// CHECK-SAME: ptr noundef [[FN:%.*]], i32 noundef [[ARG:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] !dbg [[DBG7:![0-9]+]] !type [[META18:![0-9]+]] !type [[META19:![0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: #dbg_value(ptr [[FN]], [[META16:![0-9]+]], !DIExpression(), [[META20:![0-9]+]])
+// CHECK-NEXT: #dbg_value(i32 [[ARG]], [[META17:![0-9]+]], !DIExpression(), [[META20]])
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.type.test(ptr [[FN]], metadata !"_ZTSFvu3i32E.normalized"), !dbg [[DBG21:![0-9]+]], !nosanitize [[META22:![0-9]+]]
+// CHECK-NEXT: br i1 [[TMP0]], label %[[CONT:.*]], label %[[TRAP:.*]], !dbg [[DBG21]], !prof [[PROF23:![0-9]+]], !nosanitize [[META22]]
+// CHECK: [[TRAP]]:
+// CHECK-NEXT: tail call void @llvm.ubsantrap(i8 2) #[[ATTR3:[0-9]+]], !dbg [[DBG21]], !nosanitize [[META22]]
+// CHECK-NEXT: unreachable, !dbg [[DBG21]], !nosanitize [[META22]]
+// CHECK: [[CONT]]:
+// CHECK-NEXT: tail call void [[FN]](i32 noundef [[ARG]]) #[[ATTR4:[0-9]+]], !dbg [[DBG21]]
+// CHECK-NEXT: ret void, !dbg [[DBG24:![0-9]+]]
+//
+void foo(void (*fn)(int), int arg) {
+ fn(arg);
+}
+
+// CHECK-LABEL: define dso_local void @bar(
+// CHECK-SAME: ptr noundef [[FN:%.*]], i32 noundef [[ARG1:%.*]], i32 noundef [[ARG2:%.*]]) local_unnamed_addr #[[ATTR0]] !dbg [[DBG25:![0-9]+]] !type [[META35:![0-9]+]] !type [[META36:![0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: #dbg_value(ptr [[FN]], [[META32:![0-9]+]], !DIExpression(), [[META37:![0-9]+]])
+// CHECK-NEXT: #dbg_value(i32 [[ARG1]], [[META33:![0-9]+]], !DIExpression(), [[META37]])
+// CHECK-NEXT: #dbg_value(i32 [[ARG2]], [[META34:![0-9]+]], !DIExpression(), [[META37]])
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.type.test(ptr [[FN]], metadata !"_ZTSFvu3i32S_E.normalized"), !dbg [[DBG38:![0-9]+]], !nosanitize [[META22]]
+// CHECK-NEXT: br i1 [[TMP0]], label %[[CONT:.*]], label %[[TRAP:.*]], !dbg [[DBG38]], !prof [[PROF23]], !nosanitize [[META22]]
+// CHECK: [[TRAP]]:
+// CHECK-NEXT: tail call void @llvm.ubsantrap(i8 2) #[[ATTR3]], !dbg [[DBG38]], !nosanitize [[META22]]
+// CHECK-NEXT: unreachable, !dbg [[DBG38]], !nosanitize [[META22]]
+// CHECK: [[CONT]]:
+// CHECK-NEXT: tail call void [[FN]](i32 noundef [[ARG1]], i32 noundef [[ARG2]]) #[[ATTR4]], !dbg [[DBG38]]
+// CHECK-NEXT: ret void, !dbg [[DBG39:![0-9]+]]
+//
+void bar(void (*fn)(int, int), int arg1, int arg2) {
+ fn(arg1, arg2);
+}
+
+// CHECK-LABEL: define dso_local void @baz(
+// CHECK-SAME: ptr noundef [[FN:%.*]], i32 noundef [[ARG1:%.*]], i32 noundef [[ARG2:%.*]], i32 noundef [[ARG3:%.*]]) local_unnamed_addr #[[ATTR0]] !dbg [[DBG40:![0-9]+]] !type [[META51:![0-9]+]] !type [[META52:![0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: #dbg_value(ptr [[FN]], [[META47:![0-9]+]], !DIExpression(), [[META53:![0-9]+]])
+// CHECK-NEXT: #dbg_value(i32 [[ARG1]], [[META48:![0-9]+]], !DIExpression(), [[META53]])
+// CHECK-NEXT: #dbg_value(i32 [[ARG2]], [[META49:![0-9]+]], !DIExpression(), [[META53]])
+// CHECK-NEXT: #dbg_value(i32 [[ARG3]], [[META50:![0-9]+]], !DIExpression(), [[META53]])
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.type.test(ptr [[FN]], metadata !"_ZTSFvu3i32S_S_E.normalized"), !dbg [[DBG54:![0-9]+]], !nosanitize [[META22]]
+// CHECK-NEXT: br i1 [[TMP0]], label %[[CONT:.*]], label %[[TRAP:.*]], !dbg [[DBG54]], !prof [[PROF23]], !nosanitize [[META22]]
+// CHECK: [[TRAP]]:
+// CHECK-NEXT: tail call void @llvm.ubsantrap(i8 2) #[[ATTR3]], !dbg [[DBG54]], !nosanitize [[META22]]
+// CHECK-NEXT: unreachable, !dbg [[DBG54]], !nosanitize [[META22]]
+// CHECK: [[CONT]]:
+// CHECK-NEXT: tail call void [[FN]](i32 noundef [[ARG1]], i32 noundef [[ARG2]], i32 noundef [[ARG3]]) #[[ATTR4]], !dbg [[DBG54]]
+// CHECK-NEXT: ret void, !dbg [[DBG55:![0-9]+]]
+//
+void baz(void (*fn)(int, int, int), int arg1, int arg2, int arg3) {
+ fn(arg1, arg2, arg3);
+}
+
+//.
+// CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+// CHECK: [[META1]] = !DIFile(filename: "{{.*}}<stdin>", directory: {{.*}})
+// CHECK: [[DBG7]] = distinct !DISubprogram(name: "foo", scope: [[META8:![0-9]+]], file: [[META8]], line: 24, type: [[META9:![0-9]+]], scopeLine: 24, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META15:![0-9]+]])
+// CHECK: [[META8]] = !DIFile(filename: "{{.*}}cfi-icall-normalize2-debuginfo.c", directory: {{.*}})
+// CHECK: [[META9]] = !DISubroutineType(types: [[META10:![0-9]+]])
+// CHECK: [[META10]] = !{null, [[META11:![0-9]+]], [[META14:![0-9]+]]}
+// CHECK: [[META11]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META12:![0-9]+]], size: 64)
+// CHECK: [[META12]] = !DISubroutineType(types: [[META13:![0-9]+]])
+// CHECK: [[META13]] = !{null, [[META14]]}
+// CHECK: [[META14]] = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+// CHECK: [[META15]] = !{[[META16]], [[META17]]}
+// CHECK: [[META16]] = !DILocalVariable(name: "fn", arg: 1, scope: [[DBG7]], file: [[META8]], line: 24, type: [[META11]])
+// CHECK: [[META17]] = !DILocalVariable(name: "arg", arg: 2, scope: [[DBG7]], file: [[META8]], line: 24, type: [[META14]])
+// CHECK: [[META18]] = !{i64 0, !"_ZTSFvPFvu3i32ES_E.normalized"}
+// CHECK: [[META19]] = !{i64 0, !"_ZTSFvPvu3i32E.normalized.generalized"}
+// CHECK: [[META20]] = !DILocation(line: 0, scope: [[DBG7]])
+// CHECK: [[DBG21]] = !DILocation(line: 25, column: 5, scope: [[DBG7]])
+// CHECK: [[META22]] = !{}
+// CHECK: [[PROF23]] = !{!"branch_weights", i32 1048575, i32 1}
+// CHECK: [[DBG24]] = !DILocation(line: 26, column: 1, scope: [[DBG7]])
+// CHECK: [[DBG25]] = distinct !DISubprogram(name: "bar", scope: [[META8]], file: [[META8]], line: 43, type: [[META26:![0-9]+]], scopeLine: 43, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META31:![0-9]+]])
+// CHECK: [[META26]] = !DISubroutineType(types: [[META27:![0-9]+]])
+// CHECK: [[META27]] = !{null, [[META28:![0-9]+]], [[META14]], [[META14]]}
+// CHECK: [[META28]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META29:![0-9]+]], size: 64)
+// CHECK: [[META29]] = !DISubroutineType(types: [[META30:![0-9]+]])
+// CHECK: [[META30]] = !{null, [[META14]], [[META14]]}
+// CHECK: [[META31]] = !{[[META32]], [[META33]], [[META34]]}
+// CHECK: [[META32]] = !DILocalVariable(name: "fn", arg: 1, scope: [[DBG25]], file: [[META8]], line: 43, type: [[META28]])
+// CHECK: [[META33]] = !DILocalVariable(name: "arg1", arg: 2, scope: [[DBG25]], file: [[META8]], line: 43, type: [[META14]])
+// CHECK: [[META34]] = !DILocalVariable(name: "arg2", arg: 3, scope: [[DBG25]], file: [[META8]], line: 43, type: [[META14]])
+// CHECK: [[META35]] = !{i64 0, !"_ZTSFvPFvu3i32S_ES_S_E.normalized"}
+// CHECK: [[META36]] = !{i64 0, !"_ZTSFvPvu3i32S0_E.normalized.generalized"}
+// CHECK: [[META37]] = !DILocation(line: 0, scope: [[DBG25]])
+// CHECK: [[DBG38]] = !DILocation(line: 44, column: 5, scope: [[DBG25]])
+// CHECK: [[DBG39]] = !DILocation(line: 45, column: 1, scope: [[DBG25]])
+// CHECK: [[DBG40]] = distinct !DISubprogram(name: "baz", scope: [[META8]], file: [[META8]], line: 63, type: [[META41:![0-9]+]], scopeLine: 63, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META46:![0-9]+]])
+// CHECK: [[META41]] = !DISubroutineType(types: [[META42:![0-9]+]])
+// CHECK: [[META42]] = !{null, [[META43:![0-9]+]], [[META14]], [[META14]], [[META14]]}
+// CHECK: [[META43]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: [[META44:![0-9]+]], size: 64)
+// CHECK: [[META44]] = !DISubroutineType(types: [[META45:![0-9]+]])
+// CHECK: [[META45]] = !{null, [[META14]], [[META14]], [[META14]]}
+// CHECK: [[META46]] = !{[[META47]], [[META48]], [[META49]], [[META50]]}
+// CHECK: [[META47]] = !DILocalVariable(name: "fn", arg: 1, scope: [[DBG40]], file: [[META8]], line: 63, type: [[META43]])
+// CHECK: [[META48]] = !DILocalVariable(name: "arg1", arg: 2, scope: [[DBG40]], file: [[META8]], line: 63, type: [[META14]])
+// CHECK: [[META49]] = !DILocalVariable(name: "arg2", arg: 3, scope: [[DBG40]], file: [[META8]], line: 63, type: [[META14]])
+// CHECK: [[META50]] = !DILocalVariable(name: "arg3", arg: 4, scope: [[DBG40]], file: [[META8]], line: 63, type: [[META14]])
+// CHECK: [[META51]] = !{i64 0, !"_ZTSFvPFvu3i32S_S_ES_S_S_E.normalized"}
+// CHECK: [[META52]] = !{i64 0, !"_ZTSFvPvu3i32S0_S0_E.normalized.generalized"}
+// CHECK: [[META53]] = !DILocation(line: 0, scope: [[DBG40]])
+// CHECK: [[DBG54]] = !DILocation(line: 64, column: 5, scope: [[DBG40]])
+// CHECK: [[DBG55]] = !DILocation(line: 65, column: 1, scope: [[DBG40]])
+//.
diff --git a/clang/test/Driver/no-integrated-cpp.c b/clang/test/Driver/no-integrated-cpp.c
new file mode 100644
index 0000000..a7dc847
--- /dev/null
+++ b/clang/test/Driver/no-integrated-cpp.c
@@ -0,0 +1,83 @@
+// RUN: %clang -O2 %s -E -o %t.i
+//
+// RUN: %clang -O2 %s -c -o a.o -no-integrated-cpp -### 2>&1 | FileCheck %s --check-prefixes=SRC
+// SRC: "-E"
+// SRC-SAME: "-o" "[[PREPROC:.*.i]]"
+// SRC-SAME: "-x" "c" "{{.*}}no-integrated-cpp.c"
+//
+// SRC-NEXT: "-emit-obj"
+// SRC-SAME: "-o" "a.o"
+// SRC-SAME: "-x" "cpp-output" "[[PREPROC]]"
+//
+// RUN: %clang -O2 %s -c -o a.o -no-integrated-cpp -save-temps -### 2>&1 | FileCheck %s --check-prefixes=SRC-SAVE
+// SRC-SAVE: "-E"
+// SRC-SAVE-SAME: "-o" "[[PREPROC:.*.i]]"
+// SRC-SAVE-SAME: "-x" "c" "{{.*}}no-integrated-cpp.c"
+//
+// SRC-SAVE-NEXT: "-emit-llvm-bc"
+// SRC-SAVE-SAME: "-o" "[[BITCODE:.*.bc]]"
+// SRC-SAVE-SAME: "-x" "cpp-output" "[[PREPROC]]"
+//
+// SRC-SAVE-NEXT: "-S"
+// SRC-SAVE-SAME: "-o" "[[ASM:.*.s]]"
+// SRC-SAVE-SAME: "-x" "ir" "[[BITCODE]]"
+//
+// SRC-SAVE-NEXT: "-cc1as"
+// SRC-SAVE-SAME: "-o" "a.o" "[[ASM]]"
+//
+// RUN: %clang -O2 %t.i -c -o a.o -no-integrated-cpp -### 2>&1 | FileCheck %s --check-prefixes=PRE
+// PRE-NOT: "-E"
+// PRE: "-emit-obj"
+// PRE-SAME: "-o" "a.o"
+// PRE-SAME: "-x" "cpp-output" "{{.*}}no-integrated-cpp.c.tmp.i"
+//
+// RUN: %clang -O2 %t.i -c -o a.o -no-integrated-cpp -save-temps -### 2>&1 | FileCheck %s --check-prefixes=PRE-SAVE
+// PRE-SAVE-NOT: "-E"
+// PRE-SAVE: "-emit-llvm-bc"
+// PRE-SAVE-SAME: "-o" "[[BITCODE:.*.bc]]"
+// PRE-SAVE-SAME: "-x" "cpp-output" "{{.*}}no-integrated-cpp.c.tmp.i"
+//
+// PRE-SAVE-NEXT: "-S"
+// PRE-SAVE-SAME: "-o" "[[ASM:.*.s]]"
+// PRE-SAVE-SAME: "-x" "ir" "[[BITCODE]]"
+//
+// PRE-SAVE-NEXT: "-cc1as"
+// PRE-SAVE-SAME: "-o" "a.o" "[[ASM]]"
+//
+// RUN: %clang -O2 %s -c -emit-llvm -o a.bc -no-integrated-cpp -### 2>&1 | FileCheck %s --check-prefixes=LLVM
+// LLVM: "-E"
+// LLVM-SAME: "-o" "[[PREPROC:.*.i]]"
+// LLVM-SAME: "-x" "c" "{{.*}}no-integrated-cpp.c"
+//
+// LLVM-NEXT: "-emit-llvm-bc"
+// LLVM-SAME: "-o" "a.bc"
+// LLVM-SAME: "-x" "cpp-output" "[[PREPROC]]"
+//
+// RUN: %clang -O2 %s -c -emit-llvm -o a.bc -no-integrated-cpp -save-temps -### 2>&1 | FileCheck %s --check-prefixes=LLVM-SAVE
+// LLVM-SAVE: "-E"
+// LLVM-SAVE-SAME: "-o" "[[PREPROC:.*.i]]"
+// LLVM-SAVE-SAME: "-x" "c" "{{.*}}no-integrated-cpp.c"
+//
+// LLVM-SAVE-NEXT: "-emit-llvm-bc"
+// LLVM-SAVE-SAME: "-o" "[[BITCODE:.*.bc]]"
+// LLVM-SAVE-SAME: "-x" "cpp-output" "[[PREPROC]]"
+//
+// LLVM-SAVE-NEXT: "-emit-llvm-bc"
+// LLVM-SAVE-SAME: "-o" "a.bc"
+// LLVM-SAVE-SAME: "-x" "ir" "[[BITCODE]]"
+//
+// RUN: %clang -O2 %t.i -c -emit-llvm -o a.bc -no-integrated-cpp -### 2>&1 | FileCheck %s --check-prefixes=PRE-LLVM
+// PRE-LLVM-NOT: "-E"
+// PRE-LLVM: "-emit-llvm-bc"
+// PRE-LLVM-SAME: "-o" "a.bc"
+// PRE-LLVM-SAME: "-x" "cpp-output" "{{.*}}no-integrated-cpp.c.tmp.i"
+//
+// RUN: %clang -O2 %t.i -c -emit-llvm -o a.bc -no-integrated-cpp -save-temps -### 2>&1 | FileCheck %s --check-prefixes=PRE-LLVM-SAVE
+// PRE-LLVM-SAVE-NOT: "-E"
+// PRE-LLVM-SAVE: "-emit-llvm-bc"
+// PRE-LLVM-SAVE-SAME: "-o" "[[BITCODE:.*.bc]]"
+// PRE-LLVM-SAVE-SAME: "-x" "cpp-output" "{{.*}}no-integrated-cpp.c.tmp.i"
+
+// PRE-LLVM-SAVE-NEXT: "-emit-llvm-bc"
+// PRE-LLVM-SAVE-SAME: "-o" "a.bc"
+// PRE-LLVM-SAVE-SAME: "-x" "ir" "[[BITCODE]]"
diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c
index b10850a..f1c5d45 100644
--- a/clang/test/Driver/print-supported-extensions-riscv.c
+++ b/clang/test/Driver/print-supported-extensions-riscv.c
@@ -157,6 +157,7 @@
// CHECK-NEXT: svpbmt 1.0 'Svpbmt' (Page-Based Memory Types)
// CHECK-NEXT: svvptc 1.0 'Svvptc' (Obviating Memory-Management Instructions after Marking PTEs Valid)
// CHECK-NEXT: xandesperf 5.0 'XAndesPerf' (Andes Performance Extension)
+// CHECK-NEXT: xandesvpackfph 5.0 'XAndesVPackFPH' (Andes Vector Packed FP16 Extension)
// CHECK-NEXT: xcvalu 1.0 'XCValu' (CORE-V ALU Operations)
// CHECK-NEXT: xcvbi 1.0 'XCVbi' (CORE-V Immediate Branching)
// CHECK-NEXT: xcvbitmanip 1.0 'XCVbitmanip' (CORE-V Bit Manipulation)
diff --git a/clang/test/Driver/rewrite-objc-preproc.m b/clang/test/Driver/rewrite-objc-preproc.m
new file mode 100644
index 0000000..f32d09c
--- /dev/null
+++ b/clang/test/Driver/rewrite-objc-preproc.m
@@ -0,0 +1,5 @@
+// RUN: %clang --target=x86_64-apple-macosx10.7.0 -rewrite-objc %s -o - -### 2>&1 | \
+// RUN: FileCheck %s
+//
+// Check that we're running a preprocessing stage passing a not-preprocessed objective-c++ file as input
+// CHECK: "-E"{{.*}}"-x" "objective-c++"
diff --git a/clang/test/Modules/no-external-type-id.cppm b/clang/test/Modules/no-external-type-id.cppm
index d067e57..2c05769 100644
--- a/clang/test/Modules/no-external-type-id.cppm
+++ b/clang/test/Modules/no-external-type-id.cppm
@@ -23,7 +23,7 @@ export module b;
import a;
export int b();
-// CHECK: <DECL_FUNCTION {{.*}} op8=4120
+// CHECK: <DECL_FUNCTION {{.*}} op8=[[#]]
// CHECK: <TYPE_FUNCTION_PROTO
//--- a.v1.cppm
diff --git a/clang/test/OpenMP/begin_declare_variant_executable_scope.c b/clang/test/OpenMP/begin_declare_variant_executable_scope.c
new file mode 100644
index 0000000..d3d74eb
--- /dev/null
+++ b/clang/test/OpenMP/begin_declare_variant_executable_scope.c
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
+// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
+
+// expected-no-diagnostics
+
+#pragma omp begin declare variant match(implementation={vendor(ibm)})
+void f(int);
+#pragma omp end declare variant
+
+#pragma omp begin declare variant match(implementation={vendor(llvm)})
+void f(void);
+#pragma omp end declare variant
+
+int main() {
+#pragma omp begin declare variant match(implementation={vendor(ibm)})
+ int i = 0;
+ f(i);
+#pragma omp end declare variant
+
+#pragma omp begin declare variant match(implementation={vendor(llvm)})
+ f();
+#pragma omp end declare variant
+}
diff --git a/clang/test/OpenMP/begin_declare_variant_messages.c b/clang/test/OpenMP/begin_declare_variant_messages.c
index f87714a..d8d8f42 100644
--- a/clang/test/OpenMP/begin_declare_variant_messages.c
+++ b/clang/test/OpenMP/begin_declare_variant_messages.c
@@ -16,8 +16,7 @@
#pragma omp variant begin // expected-error {{expected an OpenMP directive}}
#pragma omp declare variant end // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp begin declare variant // omp50-error {{expected 'match' clause on 'omp declare variant' directive}} omp51-error {{expected 'match', 'adjust_args', or 'append_args' clause on 'omp declare variant' directive}}
-#pragma omp end declare variant
-// TODO: Issue an error message
+#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
@@ -27,11 +26,11 @@ int foo(void);
const int var;
#pragma omp begin declare variant // omp50-error {{expected 'match' clause on 'omp declare variant' directive}} omp51-error {{expected 'match', 'adjust_args', or 'append_args' clause on 'omp declare variant' directive}}
-#pragma omp end declare variant
+#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp begin declare variant xxx // omp50-error {{expected 'match' clause on 'omp declare variant' directive}} omp51-error {{expected 'match', 'adjust_args', or 'append_args' clause on 'omp declare variant' directive}}
-#pragma omp end declare variant
+#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp begin declare variant match // expected-error {{expected '(' after 'match'}}
-#pragma omp end declare variant
+#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp begin declare variant match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'target_device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'target_device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
diff --git a/clang/test/OpenMP/cancel_messages.cpp b/clang/test/OpenMP/cancel_messages.cpp
index 0c96bee..1391578 100644
--- a/clang/test/OpenMP/cancel_messages.cpp
+++ b/clang/test/OpenMP/cancel_messages.cpp
@@ -93,3 +93,8 @@ label1 : {
return 0;
}
+namespace GH139360 {
+void f(){
+#pragma omp cancel( // expected-error {{one of 'for', 'parallel', 'sections' or 'taskgroup' is expected}}
+}
+} // namesapce GH139360
diff --git a/clang/test/OpenMP/for_collapse_messages.cpp b/clang/test/OpenMP/for_collapse_messages.cpp
index 07630ff..147536e 100644
--- a/clang/test/OpenMP/for_collapse_messages.cpp
+++ b/clang/test/OpenMP/for_collapse_messages.cpp
@@ -49,6 +49,8 @@ T tmain(T argc, S **argv) {
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#pragma omp for collapse (S) // expected-error {{'S' does not refer to a value}}
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
+ #pragma omp for collapse (0xFFFFFFFFFFFFFFFF) // expected-error {{argument to 'collapse' clause requires a value that can be represented by a 64-bit}}
+ for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#if __cplusplus <= 199711L
// expected-error@+4 2 {{integral constant expression}} expected-note@+4 0+{{constant expression}}
#else
diff --git a/clang/test/OpenMP/for_ordered_clause.cpp b/clang/test/OpenMP/for_ordered_clause.cpp
index d9dbb82..4fdcefe 100644
--- a/clang/test/OpenMP/for_ordered_clause.cpp
+++ b/clang/test/OpenMP/for_ordered_clause.cpp
@@ -53,6 +53,9 @@ T tmain(T argc, S **argv) {
#pragma omp for ordered(S) // expected-error {{'S' does not refer to a value}}
for (int i = ST; i < N; i++)
argv[0][i] = argv[0][i] - argv[0][i - ST];
+#pragma omp for ordered (0xFFFFFFFFFFFFFFFF) // expected-error {{argument to 'ordered' clause requires a value that can be represented by a 64-bit}}
+ for (int i = ST; i < N; i++)
+ argv[0][i] = argv[0][i] - argv[0][i-ST];
#if __cplusplus <= 199711L
// expected-error@+4 2 {{integral constant expression}} expected-note@+4 0+{{constant expression}}
#else
diff --git a/clang/test/OpenMP/for_simd_collapse_messages.cpp b/clang/test/OpenMP/for_simd_collapse_messages.cpp
index d9f8a2d..d108f93 100644
--- a/clang/test/OpenMP/for_simd_collapse_messages.cpp
+++ b/clang/test/OpenMP/for_simd_collapse_messages.cpp
@@ -43,6 +43,8 @@ T tmain(T argc, S **argv) {
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#pragma omp for simd collapse (S) // expected-error {{'S' does not refer to a value}}
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
+ #pragma omp for simd collapse (0xFFFFFFFFFFFFFFFF) // expected-error {{argument to 'collapse' clause requires a value that can be represented by a 64-bit}}
+ for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#if __cplusplus <= 199711L
// expected-error@+4 2 {{integral constant expression}} expected-note@+4 0+{{constant expression}}
#else
diff --git a/clang/test/OpenMP/for_simd_loop_messages.cpp b/clang/test/OpenMP/for_simd_loop_messages.cpp
index 74a52f3..f58f0f3 100644
--- a/clang/test/OpenMP/for_simd_loop_messages.cpp
+++ b/clang/test/OpenMP/for_simd_loop_messages.cpp
@@ -735,6 +735,9 @@ void test_ordered() {
#pragma omp for simd ordered(1)
for (int i = 0; i < 16; ++i)
;
+#pragma omp for simd ordered (0xFFFFFFFFFFFFFFFF) // expected-error {{argument to 'ordered' clause requires a value that can be represented by a 64-bit}}
+ for (int i = 0; i < 10; i++)
+ ;
}
void test_nowait() {
diff --git a/clang/test/OpenMP/masked_taskloop_collapse_messages.cpp b/clang/test/OpenMP/masked_taskloop_collapse_messages.cpp
index 6c15d4f..067f443 100644
--- a/clang/test/OpenMP/masked_taskloop_collapse_messages.cpp
+++ b/clang/test/OpenMP/masked_taskloop_collapse_messages.cpp
@@ -43,6 +43,8 @@ T tmain(T argc, S **argv) {
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#pragma omp masked taskloop collapse (S) // expected-error {{'S' does not refer to a value}}
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
+ #pragma omp masked taskloop collapse (0xFFFFFFFFFFFFFFFF) // expected-error {{argument to 'collapse' clause requires a value that can be represented by a 64-bit}}
+ for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#if __cplusplus <= 199711L
// expected-error@+4 2 {{integral constant expression}} expected-note@+4 0+{{constant expression}}
#else
diff --git a/clang/test/OpenMP/masked_taskloop_simd_collapse_messages.cpp b/clang/test/OpenMP/masked_taskloop_simd_collapse_messages.cpp
index 0ecf9d9..a906853 100644
--- a/clang/test/OpenMP/masked_taskloop_simd_collapse_messages.cpp
+++ b/clang/test/OpenMP/masked_taskloop_simd_collapse_messages.cpp
@@ -43,6 +43,8 @@ T tmain(T argc, S **argv) {
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#pragma omp masked taskloop simd collapse (S) // expected-error {{'S' does not refer to a value}}
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
+ #pragma omp masked taskloop simd collapse (0xFFFFFFFFFFFFFFFF) // expected-error {{argument to 'collapse' clause requires a value that can be represented by a 64-bit}}
+ for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#if __cplusplus <= 199711L
// expected-error@+4 2 {{integral constant expression}} expected-note@+4 0+{{constant expression}}
#else
diff --git a/clang/test/OpenMP/simd_collapse_messages.cpp b/clang/test/OpenMP/simd_collapse_messages.cpp
index 1ce3bef..bd0040c 100644
--- a/clang/test/OpenMP/simd_collapse_messages.cpp
+++ b/clang/test/OpenMP/simd_collapse_messages.cpp
@@ -43,6 +43,8 @@ T tmain(T argc, S **argv) {
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#pragma omp simd collapse (S) // expected-error {{'S' does not refer to a value}}
for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
+ #pragma omp simd collapse (0xFFFFFFFFFFFFFFFF) // expected-error {{argument to 'collapse' clause requires a value that can be represented by a 64-bit}}
+ for (int i = ST; i < N; i++) argv[0][i] = argv[0][i] - argv[0][i-ST];
#if __cplusplus <= 199711L
// expected-error@+4 2 {{integral constant expression}} expected-note@+4 0+{{constant expression}}
#else
@@ -97,3 +99,12 @@ int main(int argc, char **argv) {
return tmain<int, char, 1, 0>(argc, argv);
}
+namespace GH138493 {
+void f(void) {
+ // This would previously crash when processing an invalid expression as an
+ // argument to collapse.
+#pragma omp simd collapse(a) // expected-error {{use of undeclared identifier 'a'}}
+ for (int i = 0; i < 10; i++)
+ ;
+}
+} // namespace GH138493
diff --git a/clang/test/ParserOpenACC/parse-clauses.c b/clang/test/ParserOpenACC/parse-clauses.c
index 52d5828..6d771e8 100644
--- a/clang/test/ParserOpenACC/parse-clauses.c
+++ b/clang/test/ParserOpenACC/parse-clauses.c
@@ -1345,9 +1345,15 @@ void bar();
#pragma acc routine seq bind
void BCP1();
- // expected-error@+1{{expected identifier or string literal}}
+ // expected-error@+1{{expected identifier or string literal in OpenACC 'bind' clause}}
#pragma acc routine(BCP1) seq bind()
+ // expected-error@+1{{expected identifier or string literal in OpenACC 'bind' clause}}
+#pragma acc routine(BCP1) seq bind(1)
+
+ // expected-error@+1{{expected identifier or string literal in OpenACC 'bind' clause}}
+#pragma acc routine(BCP1) gang bind(0xF)
+
// expected-error@+1{{expected function or lambda declaration for 'routine' construct}}
#pragma acc routine seq bind("ReductionClauseParsing")
diff --git a/clang/test/SemaHLSL/RootSignature-err.hlsl b/clang/test/SemaHLSL/RootSignature-err.hlsl
new file mode 100644
index 0000000..f544247
--- /dev/null
+++ b/clang/test/SemaHLSL/RootSignature-err.hlsl
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -x hlsl -o - %s -verify
+
+// Attr test
+
+[RootSignature()] // expected-error {{expected string literal as argument of 'RootSignature' attribute}}
+void bad_root_signature_0() {}
+
+// expected-error@+2 {{expected ')'}}
+// expected-note@+1 {{to match this '('}}
+[RootSignature("", "")]
+void bad_root_signature_1() {}
+
+[RootSignature(""), RootSignature("DescriptorTable()")] // expected-error {{attribute 'RootSignature' cannot appear more than once on a declaration}}
+void bad_root_signature_2() {}
+
+[RootSignature(""), RootSignature("")] // expected-warning {{attribute 'RootSignature' is already applied}}
+void bad_root_signature_3() {}
+
+[RootSignature("DescriptorTable(), invalid")] // expected-error {{expected end of stream to denote end of parameters, or, another valid parameter of RootSignature}}
+void bad_root_signature_4() {}
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index 42c24cb..9163c87 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -7229,6 +7229,7 @@ CXCursor clang_getCursorDefinition(CXCursor C) {
case Decl::MSProperty:
case Decl::MSGuid:
case Decl::HLSLBuffer:
+ case Decl::HLSLRootSignature:
case Decl::UnnamedGlobalConstant:
case Decl::TemplateParamObject:
case Decl::IndirectField:
diff --git a/clang/unittests/Tooling/DependencyScanning/DependencyScanningFilesystemTest.cpp b/clang/unittests/Tooling/DependencyScanning/DependencyScanningFilesystemTest.cpp
index aed7937..7420743 100644
--- a/clang/unittests/Tooling/DependencyScanning/DependencyScanningFilesystemTest.cpp
+++ b/clang/unittests/Tooling/DependencyScanning/DependencyScanningFilesystemTest.cpp
@@ -197,7 +197,7 @@ TEST(DependencyScanningFilesystem, DiagnoseStaleStatFailures) {
EXPECT_EQ(Path1Exists, false);
std::vector<llvm::StringRef> InvalidPaths =
- SharedCache.getInvalidNegativeStatCachedPaths(*InMemoryFS.get());
+ SharedCache.getInvalidNegativeStatCachedPaths(*InMemoryFS);
EXPECT_EQ(InvalidPaths.size(), 1u);
ASSERT_STREQ("/path1.suffix", InvalidPaths[0].str().c_str());
diff --git a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index e722888..e347b89 100644
--- a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -1089,7 +1089,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
if (End) {
Parsed.push_back(New<TextPiece>(Text.slice(0, End), "diagtext"));
- Text = Text.slice(End, StringRef::npos);
+ Text = Text.substr(End);
if (Text.empty())
break;
}
@@ -1103,7 +1103,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
// Extract the (optional) modifier.
size_t ModLength = Text.find_first_of("0123456789<{");
StringRef Modifier = Text.slice(0, ModLength);
- Text = Text.slice(ModLength, StringRef::npos);
+ Text = Text.substr(ModLength);
ModifierType ModType = StringSwitch<ModifierType>{Modifier}
.Case("select", MT_Select)
.Case("enum_select", MT_EnumSelect)
@@ -1154,7 +1154,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
Text = Text.drop_front(); // Drop '<'
size_t EnumNameLen = Text.find_first_of('>');
EnumSelect->EnumName = Text.slice(0, EnumNameLen);
- Text = Text.slice(EnumNameLen, StringRef::npos);
+ Text = Text.substr(EnumNameLen);
ExpectAndConsume(">");
if (Text[0] != '{')
@@ -1169,7 +1169,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
Text = Text.drop_front(); // '%'
size_t OptionNameLen = Text.find_first_of("{");
EnumSelect->OptionEnumNames.push_back(Text.slice(0, OptionNameLen));
- Text = Text.slice(OptionNameLen, StringRef::npos);
+ Text = Text.substr(OptionNameLen);
} else {
EnumSelect->OptionEnumNames.push_back({});
}
@@ -1206,7 +1206,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
assert(!Text.empty());
Plural->OptionPrefixes.push_back(
New<TextPiece>(Text.slice(0, End), "diagtext"));
- Text = Text.slice(End, StringRef::npos);
+ Text = Text.substr(End);
Plural->Options.push_back(
parseDiagText(Text, StopAt::PipeOrCloseBrace));
assert(!Text.empty() && "malformed %plural");
diff --git a/clang/utils/TableGen/ClangOptionDocEmitter.cpp b/clang/utils/TableGen/ClangOptionDocEmitter.cpp
index b6c1aad..b651820 100644
--- a/clang/utils/TableGen/ClangOptionDocEmitter.cpp
+++ b/clang/utils/TableGen/ClangOptionDocEmitter.cpp
@@ -205,10 +205,7 @@ std::string escapeRST(StringRef Str) {
}
StringRef getSphinxOptionID(StringRef OptionName) {
- for (auto I = OptionName.begin(), E = OptionName.end(); I != E; ++I)
- if (!isalnum(*I) && *I != '-')
- return OptionName.substr(0, I - OptionName.begin());
- return OptionName;
+ return OptionName.take_while([](char C) { return isalnum(C) || C == '-'; });
}
bool canSphinxCopeWithOption(const Record *Option) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
index 1414092..fded665 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -14,6 +14,18 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
+// Helper to suppress warnings related to 8-byte atomic accesses when the target
+// is 32-bit AIX (where such accesses use libatomic).
+#if defined(_AIX) && !defined(__powerpc64__) && defined(__clang__)
+# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Watomic-alignment\"")
+# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END _Pragma("clang diagnostic pop")
+#else
+# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN
+# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END
+#endif
+
namespace __sanitizer {
// We use the compiler builtin atomic operations for loads and stores, which
@@ -35,6 +47,7 @@ inline void proc_yield(int cnt) {
#endif
}
+SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN
template <typename T>
inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
@@ -92,6 +105,8 @@ inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
}
+SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END
+
} // namespace __sanitizer
#undef ATOMIC_ORDER
diff --git a/flang/include/flang/Evaluate/tools.h b/flang/include/flang/Evaluate/tools.h
index 5cdabb3..922af41 100644
--- a/flang/include/flang/Evaluate/tools.h
+++ b/flang/include/flang/Evaluate/tools.h
@@ -399,20 +399,17 @@ template <typename T>
bool IsArrayElement(const Expr<T> &expr, bool intoSubstring = true,
bool skipComponents = false) {
if (auto dataRef{ExtractDataRef(expr, intoSubstring)}) {
- const DataRef *ref{&*dataRef};
- if (skipComponents) {
- while (const Component * component{std::get_if<Component>(&ref->u)}) {
- ref = &component->base();
+ for (const DataRef *ref{&*dataRef}; ref;) {
+ if (const Component * component{std::get_if<Component>(&ref->u)}) {
+ ref = skipComponents ? &component->base() : nullptr;
+ } else if (const auto *coarrayRef{std::get_if<CoarrayRef>(&ref->u)}) {
+ ref = &coarrayRef->base();
+ } else {
+ return std::holds_alternative<ArrayRef>(ref->u);
}
}
- if (const auto *coarrayRef{std::get_if<CoarrayRef>(&ref->u)}) {
- return !coarrayRef->subscript().empty();
- } else {
- return std::holds_alternative<ArrayRef>(ref->u);
- }
- } else {
- return false;
}
+ return false;
}
template <typename A>
@@ -426,9 +423,6 @@ std::optional<NamedEntity> ExtractNamedEntity(const A &x) {
[](Component &&component) -> std::optional<NamedEntity> {
return NamedEntity{std::move(component)};
},
- [](CoarrayRef &&co) -> std::optional<NamedEntity> {
- return co.GetBase();
- },
[](auto &&) { return std::optional<NamedEntity>{}; },
},
std::move(dataRef->u));
@@ -536,22 +530,14 @@ const Symbol *UnwrapWholeSymbolOrComponentDataRef(const A &x) {
// If an expression is a whole symbol or a whole component designator,
// potentially followed by an image selector, extract and return that symbol,
// else null.
+const Symbol *UnwrapWholeSymbolOrComponentOrCoarrayRef(const DataRef &);
template <typename A>
const Symbol *UnwrapWholeSymbolOrComponentOrCoarrayRef(const A &x) {
if (auto dataRef{ExtractDataRef(x)}) {
- if (const SymbolRef * p{std::get_if<SymbolRef>(&dataRef->u)}) {
- return &p->get();
- } else if (const Component * c{std::get_if<Component>(&dataRef->u)}) {
- if (c->base().Rank() == 0) {
- return &c->GetLastSymbol();
- }
- } else if (const CoarrayRef * c{std::get_if<CoarrayRef>(&dataRef->u)}) {
- if (c->subscript().empty()) {
- return &c->GetLastSymbol();
- }
- }
+ return UnwrapWholeSymbolOrComponentOrCoarrayRef(*dataRef);
+ } else {
+ return nullptr;
}
- return nullptr;
}
// GetFirstSymbol(A%B%C[I]%D) -> A
diff --git a/flang/include/flang/Evaluate/traverse.h b/flang/include/flang/Evaluate/traverse.h
index 45402143..48aafa8 100644
--- a/flang/include/flang/Evaluate/traverse.h
+++ b/flang/include/flang/Evaluate/traverse.h
@@ -146,8 +146,7 @@ public:
return Combine(x.base(), x.subscript());
}
Result operator()(const CoarrayRef &x) const {
- return Combine(
- x.base(), x.subscript(), x.cosubscript(), x.stat(), x.team());
+ return Combine(x.base(), x.cosubscript(), x.stat(), x.team());
}
Result operator()(const DataRef &x) const { return visitor_(x.u); }
Result operator()(const Substring &x) const {
diff --git a/flang/include/flang/Evaluate/variable.h b/flang/include/flang/Evaluate/variable.h
index 7f1518f..5c14421 100644
--- a/flang/include/flang/Evaluate/variable.h
+++ b/flang/include/flang/Evaluate/variable.h
@@ -98,8 +98,6 @@ private:
// A NamedEntity is either a whole Symbol or a component in an instance
// of a derived type. It may be a descriptor.
-// TODO: this is basically a symbol with an optional DataRef base;
-// could be used to replace Component.
class NamedEntity {
public:
CLASS_BOILERPLATE(NamedEntity)
@@ -239,28 +237,16 @@ private:
std::vector<Subscript> subscript_;
};
-// R914 coindexed-named-object
-// R924 image-selector, R926 image-selector-spec.
-// C825 severely limits the usage of derived types with coarray ultimate
-// components: they can't be pointers, allocatables, arrays, coarrays, or
-// function results. They can be components of other derived types.
-// Although the F'2018 Standard never prohibits multiple image-selectors
-// per se in the same data-ref or designator, nor the presence of an
-// image-selector after a part-ref with rank, the constraints on the
-// derived types that would have be involved make it impossible to declare
-// an object that could be referenced in these ways (esp. C748 & C825).
-// C930 precludes having both TEAM= and TEAM_NUMBER=.
-// TODO C931 prohibits the use of a coindexed object as a stat-variable.
+// A coindexed data-ref. The base is represented as a general
+// DataRef, but the base may not contain a CoarrayRef and may
+// have rank > 0 only in an uppermost ArrayRef.
class CoarrayRef {
public:
CLASS_BOILERPLATE(CoarrayRef)
- CoarrayRef(SymbolVector &&, std::vector<Subscript> &&,
- std::vector<Expr<SubscriptInteger>> &&);
+ CoarrayRef(DataRef &&, std::vector<Expr<SubscriptInteger>> &&);
- const SymbolVector &base() const { return base_; }
- SymbolVector &base() { return base_; }
- const std::vector<Subscript> &subscript() const { return subscript_; }
- std::vector<Subscript> &subscript() { return subscript_; }
+ const DataRef &base() const { return base_.value(); }
+ DataRef &base() { return base_.value(); }
const std::vector<Expr<SubscriptInteger>> &cosubscript() const {
return cosubscript_;
}
@@ -270,25 +256,24 @@ public:
// (i.e., Designator or pointer-valued FunctionRef).
std::optional<Expr<SomeInteger>> stat() const;
CoarrayRef &set_stat(Expr<SomeInteger> &&);
- std::optional<Expr<SomeInteger>> team() const;
- bool teamIsTeamNumber() const { return teamIsTeamNumber_; }
- CoarrayRef &set_team(Expr<SomeInteger> &&, bool isTeamNumber = false);
+ // When team() is Expr<SomeInteger>, it's TEAM_NUMBER=; otherwise,
+ // it's TEAM=.
+ std::optional<Expr<SomeType>> team() const;
+ CoarrayRef &set_team(Expr<SomeType> &&);
int Rank() const;
int Corank() const { return 0; }
const Symbol &GetFirstSymbol() const;
const Symbol &GetLastSymbol() const;
- NamedEntity GetBase() const;
std::optional<Expr<SubscriptInteger>> LEN() const;
bool operator==(const CoarrayRef &) const;
llvm::raw_ostream &AsFortran(llvm::raw_ostream &) const;
private:
- SymbolVector base_;
- std::vector<Subscript> subscript_;
+ common::CopyableIndirection<DataRef> base_;
std::vector<Expr<SubscriptInteger>> cosubscript_;
- std::optional<common::CopyableIndirection<Expr<SomeInteger>>> stat_, team_;
- bool teamIsTeamNumber_{false}; // false: TEAM=, true: TEAM_NUMBER=
+ std::optional<common::CopyableIndirection<Expr<SomeInteger>>> stat_;
+ std::optional<common::CopyableIndirection<Expr<SomeType>>> team_;
};
// R911 data-ref is defined syntactically as a series of part-refs, which
diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h
index a0d7a79..254236b 100644
--- a/flang/include/flang/Parser/parse-tree.h
+++ b/flang/include/flang/Parser/parse-tree.h
@@ -4149,8 +4149,8 @@ struct OmpDefaultClause {
// PRESENT // since 5.1
struct OmpDefaultmapClause {
TUPLE_CLASS_BOILERPLATE(OmpDefaultmapClause);
- ENUM_CLASS(
- ImplicitBehavior, Alloc, To, From, Tofrom, Firstprivate, None, Default)
+ ENUM_CLASS(ImplicitBehavior, Alloc, To, From, Tofrom, Firstprivate, None,
+ Default, Present)
MODIFIER_BOILERPLATE(OmpVariableCategory);
std::tuple<ImplicitBehavior, MODIFIERS()> t;
};
diff --git a/flang/include/flang/Parser/token-sequence.h b/flang/include/flang/Parser/token-sequence.h
index 69291e6..05aeacc 100644
--- a/flang/include/flang/Parser/token-sequence.h
+++ b/flang/include/flang/Parser/token-sequence.h
@@ -137,7 +137,7 @@ public:
TokenSequence &RemoveRedundantBlanks(std::size_t firstChar = 0);
TokenSequence &ClipComment(const Prescanner &, bool skipFirst = false);
const TokenSequence &CheckBadFortranCharacters(
- Messages &, const Prescanner &, bool allowAmpersand) const;
+ Messages &, const Prescanner &, bool preprocessingOnly) const;
bool BadlyNestedParentheses() const;
const TokenSequence &CheckBadParentheses(Messages &) const;
void Emit(CookedSource &) const;
diff --git a/flang/include/flang/Support/Fortran-features.h b/flang/include/flang/Support/Fortran-features.h
index 6cb1bcd..550a5c8 100644
--- a/flang/include/flang/Support/Fortran-features.h
+++ b/flang/include/flang/Support/Fortran-features.h
@@ -54,7 +54,7 @@ ENUM_CLASS(LanguageFeature, BackslashEscapes, OldDebugLines,
PolymorphicActualAllocatableOrPointerToMonomorphicDummy, RelaxedPureDummy,
UndefinableAsynchronousOrVolatileActual, AutomaticInMainProgram, PrintCptr,
SavedLocalInSpecExpr, PrintNamelist, AssumedRankPassedToNonAssumedRank,
- IgnoreIrrelevantAttributes, Unsigned)
+ IgnoreIrrelevantAttributes, Unsigned, ContiguousOkForSeqAssociation)
// Portability and suspicious usage warnings
ENUM_CLASS(UsageWarning, Portability, PointerToUndefinable,
diff --git a/flang/lib/Evaluate/check-expression.cpp b/flang/lib/Evaluate/check-expression.cpp
index d8baaf2..3d7f01d 100644
--- a/flang/lib/Evaluate/check-expression.cpp
+++ b/flang/lib/Evaluate/check-expression.cpp
@@ -946,10 +946,7 @@ public:
return std::nullopt;
}
}
- Result operator()(const CoarrayRef &x) const {
- int rank{0};
- return CheckSubscripts(x.subscript(), rank).has_value();
- }
+ Result operator()(const CoarrayRef &x) const { return (*this)(x.base()); }
Result operator()(const Component &x) const {
if (x.base().Rank() == 0) {
return (*this)(x.GetLastSymbol());
diff --git a/flang/lib/Evaluate/fold.cpp b/flang/lib/Evaluate/fold.cpp
index 5fc3172..45e842a 100644
--- a/flang/lib/Evaluate/fold.cpp
+++ b/flang/lib/Evaluate/fold.cpp
@@ -162,22 +162,17 @@ ArrayRef FoldOperation(FoldingContext &context, ArrayRef &&arrayRef) {
}
CoarrayRef FoldOperation(FoldingContext &context, CoarrayRef &&coarrayRef) {
- std::vector<Subscript> subscript;
- for (Subscript x : coarrayRef.subscript()) {
- subscript.emplace_back(FoldOperation(context, std::move(x)));
- }
+ DataRef base{FoldOperation(context, std::move(coarrayRef.base()))};
std::vector<Expr<SubscriptInteger>> cosubscript;
for (Expr<SubscriptInteger> x : coarrayRef.cosubscript()) {
cosubscript.emplace_back(Fold(context, std::move(x)));
}
- CoarrayRef folded{std::move(coarrayRef.base()), std::move(subscript),
- std::move(cosubscript)};
+ CoarrayRef folded{std::move(base), std::move(cosubscript)};
if (std::optional<Expr<SomeInteger>> stat{coarrayRef.stat()}) {
folded.set_stat(Fold(context, std::move(*stat)));
}
- if (std::optional<Expr<SomeInteger>> team{coarrayRef.team()}) {
- folded.set_team(
- Fold(context, std::move(*team)), coarrayRef.teamIsTeamNumber());
+ if (std::optional<Expr<SomeType>> team{coarrayRef.team()}) {
+ folded.set_team(Fold(context, std::move(*team)));
}
return folded;
}
diff --git a/flang/lib/Evaluate/formatting.cpp b/flang/lib/Evaluate/formatting.cpp
index 6778fac..121afc6 100644
--- a/flang/lib/Evaluate/formatting.cpp
+++ b/flang/lib/Evaluate/formatting.cpp
@@ -723,24 +723,8 @@ llvm::raw_ostream &ArrayRef::AsFortran(llvm::raw_ostream &o) const {
}
llvm::raw_ostream &CoarrayRef::AsFortran(llvm::raw_ostream &o) const {
- bool first{true};
- for (const Symbol &part : base_) {
- if (first) {
- first = false;
- } else {
- o << '%';
- }
- EmitVar(o, part);
- }
- char separator{'('};
- for (const auto &sscript : subscript_) {
- EmitVar(o << separator, sscript);
- separator = ',';
- }
- if (separator == ',') {
- o << ')';
- }
- separator = '[';
+ base().AsFortran(o);
+ char separator{'['};
for (const auto &css : cosubscript_) {
EmitVar(o << separator, css);
separator = ',';
@@ -750,8 +734,10 @@ llvm::raw_ostream &CoarrayRef::AsFortran(llvm::raw_ostream &o) const {
separator = ',';
}
if (team_) {
- EmitVar(
- o << separator, team_, teamIsTeamNumber_ ? "TEAM_NUMBER=" : "TEAM=");
+ EmitVar(o << separator, team_,
+ std::holds_alternative<Expr<SomeInteger>>(team_->value().u)
+ ? "TEAM_NUMBER="
+ : "TEAM=");
}
return o << ']';
}
diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp
index d64a008..e802915 100644
--- a/flang/lib/Evaluate/intrinsics.cpp
+++ b/flang/lib/Evaluate/intrinsics.cpp
@@ -2340,7 +2340,7 @@ std::optional<SpecificCall> IntrinsicInterface::Match(
if (!knownArg) {
knownArg = arg;
}
- if (!dimArg && rank > 0 &&
+ if (rank > 0 &&
(std::strcmp(name, "shape") == 0 ||
std::strcmp(name, "size") == 0 ||
std::strcmp(name, "ubound") == 0)) {
@@ -2351,16 +2351,18 @@ std::optional<SpecificCall> IntrinsicInterface::Match(
// over this one, as this error is caught by the second entry
// for UBOUND.)
if (auto named{ExtractNamedEntity(*arg)}) {
- if (semantics::IsAssumedSizeArray(named->GetLastSymbol())) {
+ if (semantics::IsAssumedSizeArray(ResolveAssociations(
+ named->GetLastSymbol().GetUltimate()))) {
if (strcmp(name, "shape") == 0) {
messages.Say(arg->sourceLocation(),
"The 'source=' argument to the intrinsic function 'shape' may not be assumed-size"_err_en_US);
- } else {
+ return std::nullopt;
+ } else if (!dimArg) {
messages.Say(arg->sourceLocation(),
"A dim= argument is required for '%s' when the array is assumed-size"_err_en_US,
name);
+ return std::nullopt;
}
- return std::nullopt;
}
}
}
diff --git a/flang/lib/Evaluate/shape.cpp b/flang/lib/Evaluate/shape.cpp
index f620ecd4..ac4811e 100644
--- a/flang/lib/Evaluate/shape.cpp
+++ b/flang/lib/Evaluate/shape.cpp
@@ -891,20 +891,7 @@ auto GetShapeHelper::operator()(const ArrayRef &arrayRef) const -> Result {
}
auto GetShapeHelper::operator()(const CoarrayRef &coarrayRef) const -> Result {
- NamedEntity base{coarrayRef.GetBase()};
- if (coarrayRef.subscript().empty()) {
- return (*this)(base);
- } else {
- Shape shape;
- int dimension{0};
- for (const Subscript &ss : coarrayRef.subscript()) {
- if (ss.Rank() > 0) {
- shape.emplace_back(GetExtent(ss, base, dimension));
- }
- ++dimension;
- }
- return shape;
- }
+ return (*this)(coarrayRef.base());
}
auto GetShapeHelper::operator()(const Substring &substring) const -> Result {
diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp
index 702711e3..7ce009c 100644
--- a/flang/lib/Evaluate/tools.cpp
+++ b/flang/lib/Evaluate/tools.cpp
@@ -1090,7 +1090,7 @@ auto GetSymbolVectorHelper::operator()(const ArrayRef &x) const -> Result {
return GetSymbolVector(x.base());
}
auto GetSymbolVectorHelper::operator()(const CoarrayRef &x) const -> Result {
- return x.base();
+ return GetSymbolVector(x.base());
}
const Symbol *GetLastTarget(const SymbolVector &symbols) {
@@ -1196,16 +1196,6 @@ parser::Message *AttachDeclaration(
const auto *assoc{unhosted->detailsIf<semantics::HostAssocDetails>()}) {
unhosted = &assoc->symbol();
}
- if (const auto *binding{
- unhosted->detailsIf<semantics::ProcBindingDetails>()}) {
- if (binding->symbol().name() != symbol.name()) {
- message.Attach(binding->symbol().name(),
- "Procedure '%s' of type '%s' is bound to '%s'"_en_US, symbol.name(),
- symbol.owner().GetName().value(), binding->symbol().name());
- return &message;
- }
- unhosted = &binding->symbol();
- }
if (const auto *use{symbol.detailsIf<semantics::UseDetails>()}) {
message.Attach(use->location(),
"'%s' is USE-associated with '%s' in module '%s'"_en_US, symbol.name(),
@@ -1214,6 +1204,14 @@ parser::Message *AttachDeclaration(
message.Attach(
unhosted->name(), "Declaration of '%s'"_en_US, unhosted->name());
}
+ if (const auto *binding{
+ unhosted->detailsIf<semantics::ProcBindingDetails>()}) {
+ if (binding->symbol().name() != symbol.name()) {
+ message.Attach(binding->symbol().name(),
+ "Procedure '%s' of type '%s' is bound to '%s'"_en_US, symbol.name(),
+ symbol.owner().GetName().value(), binding->symbol().name());
+ }
+ }
return &message;
}
@@ -1320,6 +1318,19 @@ std::optional<parser::MessageFixedText> CheckProcCompatibility(bool isCall,
return msg;
}
+const Symbol *UnwrapWholeSymbolOrComponentOrCoarrayRef(const DataRef &dataRef) {
+ if (const SymbolRef * p{std::get_if<SymbolRef>(&dataRef.u)}) {
+ return &p->get();
+ } else if (const Component * c{std::get_if<Component>(&dataRef.u)}) {
+ if (c->base().Rank() == 0) {
+ return &c->GetLastSymbol();
+ }
+ } else if (const CoarrayRef * c{std::get_if<CoarrayRef>(&dataRef.u)}) {
+ return UnwrapWholeSymbolOrComponentOrCoarrayRef(c->base());
+ }
+ return nullptr;
+}
+
// GetLastPointerSymbol()
static const Symbol *GetLastPointerSymbol(const Symbol &symbol) {
return IsPointer(GetAssociationRoot(symbol)) ? &symbol : nullptr;
diff --git a/flang/lib/Evaluate/variable.cpp b/flang/lib/Evaluate/variable.cpp
index 849194b..d1bff03 100644
--- a/flang/lib/Evaluate/variable.cpp
+++ b/flang/lib/Evaluate/variable.cpp
@@ -69,13 +69,9 @@ Triplet &Triplet::set_stride(Expr<SubscriptInteger> &&expr) {
return *this;
}
-CoarrayRef::CoarrayRef(SymbolVector &&base, std::vector<Subscript> &&ss,
- std::vector<Expr<SubscriptInteger>> &&css)
- : base_{std::move(base)}, subscript_(std::move(ss)),
- cosubscript_(std::move(css)) {
- CHECK(!base_.empty());
- CHECK(!cosubscript_.empty());
-}
+CoarrayRef::CoarrayRef(
+ DataRef &&base, std::vector<Expr<SubscriptInteger>> &&css)
+ : base_{std::move(base)}, cosubscript_(std::move(css)) {}
std::optional<Expr<SomeInteger>> CoarrayRef::stat() const {
if (stat_) {
@@ -85,7 +81,7 @@ std::optional<Expr<SomeInteger>> CoarrayRef::stat() const {
}
}
-std::optional<Expr<SomeInteger>> CoarrayRef::team() const {
+std::optional<Expr<SomeType>> CoarrayRef::team() const {
if (team_) {
return team_.value().value();
} else {
@@ -99,16 +95,18 @@ CoarrayRef &CoarrayRef::set_stat(Expr<SomeInteger> &&v) {
return *this;
}
-CoarrayRef &CoarrayRef::set_team(Expr<SomeInteger> &&v, bool isTeamNumber) {
- CHECK(IsVariable(v));
+CoarrayRef &CoarrayRef::set_team(Expr<SomeType> &&v) {
team_.emplace(std::move(v));
- teamIsTeamNumber_ = isTeamNumber;
return *this;
}
-const Symbol &CoarrayRef::GetFirstSymbol() const { return base_.front(); }
+const Symbol &CoarrayRef::GetFirstSymbol() const {
+ return base().GetFirstSymbol();
+}
-const Symbol &CoarrayRef::GetLastSymbol() const { return base_.back(); }
+const Symbol &CoarrayRef::GetLastSymbol() const {
+ return base().GetLastSymbol();
+}
void Substring::SetBounds(std::optional<Expr<SubscriptInteger>> &lower,
std::optional<Expr<SubscriptInteger>> &upper) {
@@ -426,17 +424,7 @@ int ArrayRef::Rank() const {
}
}
-int CoarrayRef::Rank() const {
- if (!subscript_.empty()) {
- int rank{0};
- for (const auto &expr : subscript_) {
- rank += expr.Rank();
- }
- return rank;
- } else {
- return base_.back()->Rank();
- }
-}
+int CoarrayRef::Rank() const { return base().Rank(); }
int DataRef::Rank() const {
return common::visit(common::visitors{
@@ -671,22 +659,6 @@ std::optional<DynamicType> Designator<T>::GetType() const {
return std::nullopt;
}
-static NamedEntity AsNamedEntity(const SymbolVector &x) {
- CHECK(!x.empty());
- NamedEntity result{x.front()};
- int j{0};
- for (const Symbol &symbol : x) {
- if (j++ != 0) {
- DataRef base{result.IsSymbol() ? DataRef{result.GetLastSymbol()}
- : DataRef{result.GetComponent()}};
- result = NamedEntity{Component{std::move(base), symbol}};
- }
- }
- return result;
-}
-
-NamedEntity CoarrayRef::GetBase() const { return AsNamedEntity(base_); }
-
// Equality testing
// For the purposes of comparing type parameter expressions while
@@ -759,9 +731,8 @@ bool ArrayRef::operator==(const ArrayRef &that) const {
return base_ == that.base_ && subscript_ == that.subscript_;
}
bool CoarrayRef::operator==(const CoarrayRef &that) const {
- return base_ == that.base_ && subscript_ == that.subscript_ &&
- cosubscript_ == that.cosubscript_ && stat_ == that.stat_ &&
- team_ == that.team_ && teamIsTeamNumber_ == that.teamIsTeamNumber_;
+ return base_ == that.base_ && cosubscript_ == that.cosubscript_ &&
+ stat_ == that.stat_ && team_ == that.team_;
}
bool DataRef::operator==(const DataRef &that) const {
return TestVariableEquality(*this, that);
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index 43375e8..cf9a322 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -4778,7 +4778,14 @@ private:
nbDeviceResidentObject <= 1 &&
"Only one reference to the device resident object is supported");
auto addr = getSymbolAddress(sym);
- hlfir::Entity entity{addr};
+ mlir::Value baseValue;
+ if (auto declareOp =
+ llvm::dyn_cast<hlfir::DeclareOp>(addr.getDefiningOp()))
+ baseValue = declareOp.getBase();
+ else
+ baseValue = addr;
+
+ hlfir::Entity entity{baseValue};
auto [temp, cleanup] =
hlfir::createTempFromMold(loc, builder, entity);
auto needCleanup = fir::getIntIfConstant(cleanup);
diff --git a/flang/lib/Lower/ConvertCall.cpp b/flang/lib/Lower/ConvertCall.cpp
index a5b85e2..d37d51f 100644
--- a/flang/lib/Lower/ConvertCall.cpp
+++ b/flang/lib/Lower/ConvertCall.cpp
@@ -960,9 +960,26 @@ struct CallCleanUp {
mlir::Value tempVar;
mlir::Value mustFree;
};
- void genCleanUp(mlir::Location loc, fir::FirOpBuilder &builder) {
- Fortran::common::visit([&](auto &c) { c.genCleanUp(loc, builder); },
+
+ /// Generate clean-up code.
+ /// If \p postponeAssociates is true, the ExprAssociate clean-up
+ /// is not generated, and instead the corresponding CallCleanUp
+ /// object is returned as the result.
+ std::optional<CallCleanUp> genCleanUp(mlir::Location loc,
+ fir::FirOpBuilder &builder,
+ bool postponeAssociates) {
+ std::optional<CallCleanUp> postponed;
+ Fortran::common::visit(Fortran::common::visitors{
+ [&](CopyIn &c) { c.genCleanUp(loc, builder); },
+ [&](ExprAssociate &c) {
+ if (postponeAssociates)
+ postponed = CallCleanUp{c};
+ else
+ c.genCleanUp(loc, builder);
+ },
+ },
cleanUp);
+ return postponed;
}
std::variant<CopyIn, ExprAssociate> cleanUp;
};
@@ -1729,10 +1746,23 @@ genUserCall(Fortran::lower::PreparedActualArguments &loweredActuals,
caller, callSiteType, callContext.resultType,
callContext.isElementalProcWithArrayArgs());
- /// Clean-up associations and copy-in.
- for (auto cleanUp : callCleanUps)
- cleanUp.genCleanUp(loc, builder);
-
+ // Clean-up associations and copy-in.
+ // The association clean-ups are postponed to the end of the statement
+ // lowering. The copy-in clean-ups may be delayed as well,
+ // but they are done immediately after the call currently.
+ llvm::SmallVector<CallCleanUp> associateCleanups;
+ for (auto cleanUp : callCleanUps) {
+ auto postponed =
+ cleanUp.genCleanUp(loc, builder, /*postponeAssociates=*/true);
+ if (postponed)
+ associateCleanups.push_back(*postponed);
+ }
+
+ fir::FirOpBuilder *bldr = &builder;
+ callContext.stmtCtx.attachCleanup([=]() {
+ for (auto cleanUp : associateCleanups)
+ (void)cleanUp.genCleanUp(loc, *bldr, /*postponeAssociates=*/false);
+ });
if (auto *entity = std::get_if<hlfir::EntityWithAttributes>(&loweredResult))
return *entity;
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 2f70041..e191828 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -416,7 +416,8 @@ static inline void genAtomicUpdateStatement(
Fortran::lower::AbstractConverter &converter, mlir::Value lhsAddr,
mlir::Type varType, const Fortran::parser::Variable &assignmentStmtVariable,
const Fortran::parser::Expr &assignmentStmtExpr, mlir::Location loc,
- mlir::Operation *atomicCaptureOp = nullptr) {
+ mlir::Operation *atomicCaptureOp = nullptr,
+ Fortran::lower::StatementContext *atomicCaptureStmtCtx = nullptr) {
// Generate `atomic.update` operation for atomic assignment statements
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
mlir::Location currentLocation = converter.getCurrentLocation();
@@ -496,15 +497,24 @@ static inline void genAtomicUpdateStatement(
},
assignmentStmtExpr.u);
Fortran::lower::StatementContext nonAtomicStmtCtx;
+ Fortran::lower::StatementContext *stmtCtxPtr = &nonAtomicStmtCtx;
if (!nonAtomicSubExprs.empty()) {
// Generate non atomic part before all the atomic operations.
auto insertionPoint = firOpBuilder.saveInsertionPoint();
- if (atomicCaptureOp)
+ if (atomicCaptureOp) {
+ assert(atomicCaptureStmtCtx && "must specify statement context");
firOpBuilder.setInsertionPoint(atomicCaptureOp);
+ // Any clean-ups associated with the expression lowering
+ // must also be generated outside of the atomic update operation
+ // and after the atomic capture operation.
+ // The atomicCaptureStmtCtx will be finalized at the end
+ // of the atomic capture operation generation.
+ stmtCtxPtr = atomicCaptureStmtCtx;
+ }
mlir::Value nonAtomicVal;
for (auto *nonAtomicSubExpr : nonAtomicSubExprs) {
nonAtomicVal = fir::getBase(converter.genExprValue(
- currentLocation, *nonAtomicSubExpr, nonAtomicStmtCtx));
+ currentLocation, *nonAtomicSubExpr, *stmtCtxPtr));
exprValueOverrides.try_emplace(nonAtomicSubExpr, nonAtomicVal);
}
if (atomicCaptureOp)
@@ -652,7 +662,7 @@ void genAtomicCapture(Fortran::lower::AbstractConverter &converter,
genAtomicCaptureStatement(converter, stmt2LHSArg, stmt1LHSArg,
elementType, loc);
genAtomicUpdateStatement(converter, stmt2LHSArg, stmt2VarType, stmt2Var,
- stmt2Expr, loc, atomicCaptureOp);
+ stmt2Expr, loc, atomicCaptureOp, &stmtCtx);
} else {
// Atomic capture construct is of the form [capture-stmt, write-stmt]
firOpBuilder.setInsertionPoint(atomicCaptureOp);
@@ -672,13 +682,15 @@ void genAtomicCapture(Fortran::lower::AbstractConverter &converter,
*Fortran::semantics::GetExpr(stmt2Expr);
mlir::Type elementType = converter.genType(fromExpr);
genAtomicUpdateStatement(converter, stmt1LHSArg, stmt1VarType, stmt1Var,
- stmt1Expr, loc, atomicCaptureOp);
+ stmt1Expr, loc, atomicCaptureOp, &stmtCtx);
genAtomicCaptureStatement(converter, stmt1LHSArg, stmt2LHSArg, elementType,
loc);
}
firOpBuilder.setInsertionPointToEnd(&block);
firOpBuilder.create<mlir::acc::TerminatorOp>(loc);
- firOpBuilder.setInsertionPointToStart(&block);
+ // The clean-ups associated with the statements inside the capture
+ // construct must be generated after the AtomicCaptureOp.
+ firOpBuilder.setInsertionPointAfter(atomicCaptureOp);
}
template <typename Op>
diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
index 79b5087..f487625 100644
--- a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
@@ -877,6 +877,26 @@ static bool isVectorSubscript(const evaluate::Expr<T> &expr) {
return false;
}
+bool ClauseProcessor::processDefaultMap(lower::StatementContext &stmtCtx,
+ DefaultMapsTy &result) const {
+ auto process = [&](const omp::clause::Defaultmap &clause,
+ const parser::CharBlock &) {
+ using Defmap = omp::clause::Defaultmap;
+ clause::Defaultmap::VariableCategory variableCategory =
+ Defmap::VariableCategory::All;
+ // Variable Category is optional, if not specified defaults to all.
+ // Multiples of the same category are illegal as are any other
+ // defaultmaps being specified when a user specified all is in place,
+ // however, this should be handled earlier during semantics.
+ if (auto varCat =
+ std::get<std::optional<Defmap::VariableCategory>>(clause.t))
+ variableCategory = varCat.value();
+ auto behaviour = std::get<Defmap::ImplicitBehavior>(clause.t);
+ result[variableCategory] = behaviour;
+ };
+ return findRepeatableClause<omp::clause::Defaultmap>(process);
+}
+
bool ClauseProcessor::processDepend(lower::SymMap &symMap,
lower::StatementContext &stmtCtx,
mlir::omp::DependClauseOps &result) const {
diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.h b/flang/lib/Lower/OpenMP/ClauseProcessor.h
index 7857ba3..df398c7 100644
--- a/flang/lib/Lower/OpenMP/ClauseProcessor.h
+++ b/flang/lib/Lower/OpenMP/ClauseProcessor.h
@@ -32,6 +32,10 @@ namespace Fortran {
namespace lower {
namespace omp {
+// Container type for tracking user specified Defaultmaps for a target region
+using DefaultMapsTy = std::map<clause::Defaultmap::VariableCategory,
+ clause::Defaultmap::ImplicitBehavior>;
+
/// Class that handles the processing of OpenMP clauses.
///
/// Its `process<ClauseName>()` methods perform MLIR code generation for their
@@ -110,6 +114,8 @@ public:
bool processCopyin() const;
bool processCopyprivate(mlir::Location currentLocation,
mlir::omp::CopyprivateClauseOps &result) const;
+ bool processDefaultMap(lower::StatementContext &stmtCtx,
+ DefaultMapsTy &result) const;
bool processDepend(lower::SymMap &symMap, lower::StatementContext &stmtCtx,
mlir::omp::DependClauseOps &result) const;
bool
diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp
index c258bef..f3088b1 100644
--- a/flang/lib/Lower/OpenMP/Clauses.cpp
+++ b/flang/lib/Lower/OpenMP/Clauses.cpp
@@ -612,7 +612,7 @@ Defaultmap make(const parser::OmpClause::Defaultmap &inp,
MS(Firstprivate, Firstprivate)
MS(None, None)
MS(Default, Default)
- // MS(, Present) missing-in-parser
+ MS(Present, Present)
// clang-format on
);
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index 54560729..4909c3e 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -980,6 +980,145 @@ static void genLoopVars(
firOpBuilder.setInsertionPointAfter(storeOp);
}
+static clause::Defaultmap::ImplicitBehavior
+getDefaultmapIfPresent(const DefaultMapsTy &defaultMaps, mlir::Type varType) {
+ using DefMap = clause::Defaultmap;
+
+ if (defaultMaps.empty())
+ return DefMap::ImplicitBehavior::Default;
+
+ if (llvm::is_contained(defaultMaps, DefMap::VariableCategory::All))
+ return defaultMaps.at(DefMap::VariableCategory::All);
+
+ // NOTE: Unsure if complex and/or vector falls into a scalar type
+ // or aggregate, but the current default implicit behaviour is to
+ // treat them as such (c_ptr has its own behaviour, so perhaps
+ // being lumped in as a scalar isn't the right thing).
+ if ((fir::isa_trivial(varType) || fir::isa_char(varType) ||
+ fir::isa_builtin_cptr_type(varType)) &&
+ llvm::is_contained(defaultMaps, DefMap::VariableCategory::Scalar))
+ return defaultMaps.at(DefMap::VariableCategory::Scalar);
+
+ if (fir::isPointerType(varType) &&
+ llvm::is_contained(defaultMaps, DefMap::VariableCategory::Pointer))
+ return defaultMaps.at(DefMap::VariableCategory::Pointer);
+
+ if (fir::isAllocatableType(varType) &&
+ llvm::is_contained(defaultMaps, DefMap::VariableCategory::Allocatable))
+ return defaultMaps.at(DefMap::VariableCategory::Allocatable);
+
+ if (fir::isa_aggregate(varType) &&
+ llvm::is_contained(defaultMaps, DefMap::VariableCategory::Aggregate))
+ return defaultMaps.at(DefMap::VariableCategory::Aggregate);
+
+ return DefMap::ImplicitBehavior::Default;
+}
+
+static std::pair<llvm::omp::OpenMPOffloadMappingFlags,
+ mlir::omp::VariableCaptureKind>
+getImplicitMapTypeAndKind(fir::FirOpBuilder &firOpBuilder,
+ lower::AbstractConverter &converter,
+ const DefaultMapsTy &defaultMaps, mlir::Type varType,
+ mlir::Location loc, const semantics::Symbol &sym) {
+ using DefMap = clause::Defaultmap;
+ // Check if a value of type `type` can be passed to the kernel by value.
+ // All kernel parameters are of pointer type, so if the value can be
+ // represented inside of a pointer, then it can be passed by value.
+ auto isLiteralType = [&](mlir::Type type) {
+ const mlir::DataLayout &dl = firOpBuilder.getDataLayout();
+ mlir::Type ptrTy =
+ mlir::LLVM::LLVMPointerType::get(&converter.getMLIRContext());
+ uint64_t ptrSize = dl.getTypeSize(ptrTy);
+ uint64_t ptrAlign = dl.getTypePreferredAlignment(ptrTy);
+
+ auto [size, align] = fir::getTypeSizeAndAlignmentOrCrash(
+ loc, type, dl, converter.getKindMap());
+ return size <= ptrSize && align <= ptrAlign;
+ };
+
+ llvm::omp::OpenMPOffloadMappingFlags mapFlag =
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
+
+ auto implicitBehaviour = getDefaultmapIfPresent(defaultMaps, varType);
+ if (implicitBehaviour == DefMap::ImplicitBehavior::Default) {
+ mlir::omp::VariableCaptureKind captureKind =
+ mlir::omp::VariableCaptureKind::ByRef;
+
+ // If a variable is specified in declare target link and if device
+ // type is not specified as `nohost`, it needs to be mapped tofrom
+ mlir::ModuleOp mod = firOpBuilder.getModule();
+ mlir::Operation *op = mod.lookupSymbol(converter.mangleName(sym));
+ auto declareTargetOp =
+ llvm::dyn_cast_if_present<mlir::omp::DeclareTargetInterface>(op);
+ if (declareTargetOp && declareTargetOp.isDeclareTarget()) {
+ if (declareTargetOp.getDeclareTargetCaptureClause() ==
+ mlir::omp::DeclareTargetCaptureClause::link &&
+ declareTargetOp.getDeclareTargetDeviceType() !=
+ mlir::omp::DeclareTargetDeviceType::nohost) {
+ mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
+ mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
+ }
+ } else if (fir::isa_trivial(varType) || fir::isa_char(varType)) {
+ // Scalars behave as if they were "firstprivate".
+ // TODO: Handle objects that are shared/lastprivate or were listed
+ // in an in_reduction clause.
+ if (isLiteralType(varType)) {
+ captureKind = mlir::omp::VariableCaptureKind::ByCopy;
+ } else {
+ mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
+ }
+ } else if (!fir::isa_builtin_cptr_type(varType)) {
+ mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
+ mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
+ }
+ return std::make_pair(mapFlag, captureKind);
+ }
+
+ switch (implicitBehaviour) {
+ case DefMap::ImplicitBehavior::Alloc:
+ return std::make_pair(llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE,
+ mlir::omp::VariableCaptureKind::ByRef);
+ break;
+ case DefMap::ImplicitBehavior::Firstprivate:
+ case DefMap::ImplicitBehavior::None:
+ TODO(loc, "Firstprivate and None are currently unsupported defaultmap "
+ "behaviour");
+ break;
+ case DefMap::ImplicitBehavior::From:
+ return std::make_pair(mapFlag |=
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM,
+ mlir::omp::VariableCaptureKind::ByRef);
+ break;
+ case DefMap::ImplicitBehavior::Present:
+ return std::make_pair(mapFlag |=
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_PRESENT,
+ mlir::omp::VariableCaptureKind::ByRef);
+ break;
+ case DefMap::ImplicitBehavior::To:
+ return std::make_pair(mapFlag |=
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO,
+ (fir::isa_trivial(varType) || fir::isa_char(varType))
+ ? mlir::omp::VariableCaptureKind::ByCopy
+ : mlir::omp::VariableCaptureKind::ByRef);
+ break;
+ case DefMap::ImplicitBehavior::Tofrom:
+ return std::make_pair(mapFlag |=
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM |
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO,
+ mlir::omp::VariableCaptureKind::ByRef);
+ break;
+ case DefMap::ImplicitBehavior::Default:
+ llvm_unreachable(
+ "Implicit None Behaviour Should Have Been Handled Earlier");
+ break;
+ }
+
+ return std::make_pair(mapFlag |=
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM |
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO,
+ mlir::omp::VariableCaptureKind::ByRef);
+}
+
static void
markDeclareTarget(mlir::Operation *op, lower::AbstractConverter &converter,
mlir::omp::DeclareTargetCaptureClause captureClause,
@@ -1677,11 +1816,13 @@ static void genTargetClauses(
lower::SymMap &symTable, lower::StatementContext &stmtCtx,
lower::pft::Evaluation &eval, const List<Clause> &clauses,
mlir::Location loc, mlir::omp::TargetOperands &clauseOps,
+ DefaultMapsTy &defaultMaps,
llvm::SmallVectorImpl<const semantics::Symbol *> &hasDeviceAddrSyms,
llvm::SmallVectorImpl<const semantics::Symbol *> &isDevicePtrSyms,
llvm::SmallVectorImpl<const semantics::Symbol *> &mapSyms) {
ClauseProcessor cp(converter, semaCtx, clauses);
cp.processBare(clauseOps);
+ cp.processDefaultMap(stmtCtx, defaultMaps);
cp.processDepend(symTable, stmtCtx, clauseOps);
cp.processDevice(stmtCtx, clauseOps);
cp.processHasDeviceAddr(stmtCtx, clauseOps, hasDeviceAddrSyms);
@@ -1696,9 +1837,8 @@ static void genTargetClauses(
cp.processNowait(clauseOps);
cp.processThreadLimit(stmtCtx, clauseOps);
- cp.processTODO<clause::Allocate, clause::Defaultmap, clause::InReduction,
- clause::UsesAllocators>(loc,
- llvm::omp::Directive::OMPD_target);
+ cp.processTODO<clause::Allocate, clause::InReduction, clause::UsesAllocators>(
+ loc, llvm::omp::Directive::OMPD_target);
// `target private(..)` is only supported in delayed privatization mode.
if (!enableDelayedPrivatizationStaging)
@@ -2242,10 +2382,12 @@ genTargetOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
hostEvalInfo.emplace_back();
mlir::omp::TargetOperands clauseOps;
+ DefaultMapsTy defaultMaps;
llvm::SmallVector<const semantics::Symbol *> mapSyms, isDevicePtrSyms,
hasDeviceAddrSyms;
genTargetClauses(converter, semaCtx, symTable, stmtCtx, eval, item->clauses,
- loc, clauseOps, hasDeviceAddrSyms, isDevicePtrSyms, mapSyms);
+ loc, clauseOps, defaultMaps, hasDeviceAddrSyms,
+ isDevicePtrSyms, mapSyms);
DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval,
/*shouldCollectPreDeterminedSymbols=*/
@@ -2253,21 +2395,6 @@ genTargetOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
/*useDelayedPrivatization=*/true, symTable);
dsp.processStep1(&clauseOps);
- // Check if a value of type `type` can be passed to the kernel by value.
- // All kernel parameters are of pointer type, so if the value can be
- // represented inside of a pointer, then it can be passed by value.
- auto isLiteralType = [&](mlir::Type type) {
- const mlir::DataLayout &dl = firOpBuilder.getDataLayout();
- mlir::Type ptrTy =
- mlir::LLVM::LLVMPointerType::get(&converter.getMLIRContext());
- uint64_t ptrSize = dl.getTypeSize(ptrTy);
- uint64_t ptrAlign = dl.getTypePreferredAlignment(ptrTy);
-
- auto [size, align] = fir::getTypeSizeAndAlignmentOrCrash(
- loc, type, dl, converter.getKindMap());
- return size <= ptrSize && align <= ptrAlign;
- };
-
// 5.8.1 Implicit Data-Mapping Attribute Rules
// The following code follows the implicit data-mapping rules to map all the
// symbols used inside the region that do not have explicit data-environment
@@ -2330,56 +2457,25 @@ genTargetOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
firOpBuilder, info, dataExv,
semantics::IsAssumedSizeArray(sym.GetUltimate()),
converter.getCurrentLocation());
-
- llvm::omp::OpenMPOffloadMappingFlags mapFlag =
- llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
- mlir::omp::VariableCaptureKind captureKind =
- mlir::omp::VariableCaptureKind::ByRef;
-
mlir::Value baseOp = info.rawInput;
mlir::Type eleType = baseOp.getType();
if (auto refType = mlir::dyn_cast<fir::ReferenceType>(baseOp.getType()))
eleType = refType.getElementType();
- // If a variable is specified in declare target link and if device
- // type is not specified as `nohost`, it needs to be mapped tofrom
- mlir::ModuleOp mod = firOpBuilder.getModule();
- mlir::Operation *op = mod.lookupSymbol(converter.mangleName(sym));
- auto declareTargetOp =
- llvm::dyn_cast_if_present<mlir::omp::DeclareTargetInterface>(op);
- if (declareTargetOp && declareTargetOp.isDeclareTarget()) {
- if (declareTargetOp.getDeclareTargetCaptureClause() ==
- mlir::omp::DeclareTargetCaptureClause::link &&
- declareTargetOp.getDeclareTargetDeviceType() !=
- mlir::omp::DeclareTargetDeviceType::nohost) {
- mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
- mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
- }
- } else if (fir::isa_trivial(eleType) || fir::isa_char(eleType)) {
- // Scalars behave as if they were "firstprivate".
- // TODO: Handle objects that are shared/lastprivate or were listed
- // in an in_reduction clause.
- if (isLiteralType(eleType)) {
- captureKind = mlir::omp::VariableCaptureKind::ByCopy;
- } else {
- mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
- }
- } else if (!fir::isa_builtin_cptr_type(eleType)) {
- mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
- mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
- }
- auto location =
- mlir::NameLoc::get(mlir::StringAttr::get(firOpBuilder.getContext(),
- sym.name().ToString()),
- baseOp.getLoc());
+ std::pair<llvm::omp::OpenMPOffloadMappingFlags,
+ mlir::omp::VariableCaptureKind>
+ mapFlagAndKind = getImplicitMapTypeAndKind(
+ firOpBuilder, converter, defaultMaps, eleType, loc, sym);
+
mlir::Value mapOp = createMapInfoOp(
- firOpBuilder, location, baseOp, /*varPtrPtr=*/mlir::Value{},
- name.str(), bounds, /*members=*/{},
+ firOpBuilder, converter.getCurrentLocation(), baseOp,
+ /*varPtrPtr=*/mlir::Value{}, name.str(), bounds, /*members=*/{},
/*membersIndex=*/mlir::ArrayAttr{},
static_cast<
std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
- mapFlag),
- captureKind, baseOp.getType(), /*partialMap=*/false, mapperId);
+ std::get<0>(mapFlagAndKind)),
+ std::get<1>(mapFlagAndKind), baseOp.getType(),
+ /*partialMap=*/false, mapperId);
clauseOps.mapVars.push_back(mapOp);
mapSyms.push_back(&sym);
@@ -2720,7 +2816,8 @@ static void genAtomicUpdateStatement(
const parser::Expr &assignmentStmtExpr,
const parser::OmpAtomicClauseList *leftHandClauseList,
const parser::OmpAtomicClauseList *rightHandClauseList, mlir::Location loc,
- mlir::Operation *atomicCaptureOp = nullptr) {
+ mlir::Operation *atomicCaptureOp = nullptr,
+ lower::StatementContext *atomicCaptureStmtCtx = nullptr) {
// Generate `atomic.update` operation for atomic assignment statements
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
mlir::Location currentLocation = converter.getCurrentLocation();
@@ -2794,15 +2891,24 @@ static void genAtomicUpdateStatement(
},
assignmentStmtExpr.u);
lower::StatementContext nonAtomicStmtCtx;
+ lower::StatementContext *stmtCtxPtr = &nonAtomicStmtCtx;
if (!nonAtomicSubExprs.empty()) {
// Generate non atomic part before all the atomic operations.
auto insertionPoint = firOpBuilder.saveInsertionPoint();
- if (atomicCaptureOp)
+ if (atomicCaptureOp) {
+ assert(atomicCaptureStmtCtx && "must specify statement context");
firOpBuilder.setInsertionPoint(atomicCaptureOp);
+ // Any clean-ups associated with the expression lowering
+ // must also be generated outside of the atomic update operation
+ // and after the atomic capture operation.
+ // The atomicCaptureStmtCtx will be finalized at the end
+ // of the atomic capture operation generation.
+ stmtCtxPtr = atomicCaptureStmtCtx;
+ }
mlir::Value nonAtomicVal;
for (auto *nonAtomicSubExpr : nonAtomicSubExprs) {
nonAtomicVal = fir::getBase(converter.genExprValue(
- currentLocation, *nonAtomicSubExpr, nonAtomicStmtCtx));
+ currentLocation, *nonAtomicSubExpr, *stmtCtxPtr));
exprValueOverrides.try_emplace(nonAtomicSubExpr, nonAtomicVal);
}
if (atomicCaptureOp)
@@ -3142,7 +3248,7 @@ static void genAtomicCapture(lower::AbstractConverter &converter,
genAtomicUpdateStatement(
converter, stmt2LHSArg, stmt2VarType, stmt2Var, stmt2Expr,
/*leftHandClauseList=*/nullptr,
- /*rightHandClauseList=*/nullptr, loc, atomicCaptureOp);
+ /*rightHandClauseList=*/nullptr, loc, atomicCaptureOp, &stmtCtx);
} else {
// Atomic capture construct is of the form [capture-stmt, write-stmt]
firOpBuilder.setInsertionPoint(atomicCaptureOp);
@@ -3188,7 +3294,7 @@ static void genAtomicCapture(lower::AbstractConverter &converter,
genAtomicUpdateStatement(
converter, stmt1LHSArg, stmt1VarType, stmt1Var, stmt1Expr,
/*leftHandClauseList=*/nullptr,
- /*rightHandClauseList=*/nullptr, loc, atomicCaptureOp);
+ /*rightHandClauseList=*/nullptr, loc, atomicCaptureOp, &stmtCtx);
if (stmt1VarType != stmt2VarType) {
mlir::Value alloca;
@@ -3220,7 +3326,9 @@ static void genAtomicCapture(lower::AbstractConverter &converter,
}
firOpBuilder.setInsertionPointToEnd(&block);
firOpBuilder.create<mlir::omp::TerminatorOp>(loc);
- firOpBuilder.setInsertionPointToStart(&block);
+ // The clean-ups associated with the statements inside the capture
+ // construct must be generated after the AtomicCaptureOp.
+ firOpBuilder.setInsertionPointAfter(atomicCaptureOp);
}
//===----------------------------------------------------------------------===//
@@ -4199,6 +4307,7 @@ static void genOMP(lower::AbstractConverter &converter, lower::SymMap &symTable,
!std::holds_alternative<clause::Copyin>(clause.u) &&
!std::holds_alternative<clause::Copyprivate>(clause.u) &&
!std::holds_alternative<clause::Default>(clause.u) &&
+ !std::holds_alternative<clause::Defaultmap>(clause.u) &&
!std::holds_alternative<clause::Depend>(clause.u) &&
!std::holds_alternative<clause::Filter>(clause.u) &&
!std::holds_alternative<clause::Final>(clause.u) &&
diff --git a/flang/lib/Lower/Support/Utils.cpp b/flang/lib/Lower/Support/Utils.cpp
index ed2700c..668ee31 100644
--- a/flang/lib/Lower/Support/Utils.cpp
+++ b/flang/lib/Lower/Support/Utils.cpp
@@ -70,18 +70,12 @@ public:
return getHashValue(x.base()) * 89u - subs;
}
static unsigned getHashValue(const Fortran::evaluate::CoarrayRef &x) {
- unsigned subs = 1u;
- for (const Fortran::evaluate::Subscript &v : x.subscript())
- subs -= getHashValue(v);
unsigned cosubs = 3u;
for (const Fortran::evaluate::Expr<Fortran::evaluate::SubscriptInteger> &v :
x.cosubscript())
cosubs -= getHashValue(v);
- unsigned syms = 7u;
- for (const Fortran::evaluate::SymbolRef &v : x.base())
- syms += getHashValue(v);
- return syms * 97u - subs - cosubs + getHashValue(x.stat()) + 257u +
- getHashValue(x.team());
+ return getHashValue(x.base()) * 97u - cosubs + getHashValue(x.stat()) +
+ 257u + getHashValue(x.team());
}
static unsigned getHashValue(const Fortran::evaluate::NamedEntity &x) {
if (x.IsSymbol())
@@ -339,7 +333,6 @@ public:
static bool isEqual(const Fortran::evaluate::CoarrayRef &x,
const Fortran::evaluate::CoarrayRef &y) {
return isEqual(x.base(), y.base()) &&
- isEqual(x.subscript(), y.subscript()) &&
isEqual(x.cosubscript(), y.cosubscript()) &&
isEqual(x.stat(), y.stat()) && isEqual(x.team(), y.team());
}
diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp
index ffee571..0254ac4 100644
--- a/flang/lib/Parser/openmp-parsers.cpp
+++ b/flang/lib/Parser/openmp-parsers.cpp
@@ -705,7 +705,7 @@ TYPE_PARSER(construct<OmpMapClause>(
// [OpenMP 5.0]
// 2.19.7.2 defaultmap(implicit-behavior[:variable-category])
// implicit-behavior -> ALLOC | TO | FROM | TOFROM | FIRSRTPRIVATE | NONE |
-// DEFAULT
+// DEFAULT | PRESENT
// variable-category -> ALL | SCALAR | AGGREGATE | ALLOCATABLE | POINTER
TYPE_PARSER(construct<OmpDefaultmapClause>(
construct<OmpDefaultmapClause::ImplicitBehavior>(
@@ -716,7 +716,8 @@ TYPE_PARSER(construct<OmpDefaultmapClause>(
"FIRSTPRIVATE" >>
pure(OmpDefaultmapClause::ImplicitBehavior::Firstprivate) ||
"NONE" >> pure(OmpDefaultmapClause::ImplicitBehavior::None) ||
- "DEFAULT" >> pure(OmpDefaultmapClause::ImplicitBehavior::Default)),
+ "DEFAULT" >> pure(OmpDefaultmapClause::ImplicitBehavior::Default) ||
+ "PRESENT" >> pure(OmpDefaultmapClause::ImplicitBehavior::Present)),
maybe(":" >> nonemptyList(Parser<OmpDefaultmapClause::Modifier>{}))))
TYPE_PARSER(construct<OmpScheduleClause::Kind>(
diff --git a/flang/lib/Parser/parsing.cpp b/flang/lib/Parser/parsing.cpp
index 17f5441..93737d9 100644
--- a/flang/lib/Parser/parsing.cpp
+++ b/flang/lib/Parser/parsing.cpp
@@ -230,10 +230,11 @@ void Parsing::EmitPreprocessedSource(
column = 7; // start of fixed form source field
++sourceLine;
inContinuation = true;
- } else if (!inDirective && ch != ' ' && (ch < '0' || ch > '9')) {
+ } else if (!inDirective && !ompConditionalLine && ch != ' ' &&
+ (ch < '0' || ch > '9')) {
// Put anything other than a label or directive into the
// Fortran fixed form source field (columns [7:72]).
- for (; column < 7; ++column) {
+ for (int toCol{ch == '&' ? 6 : 7}; column < toCol; ++column) {
out << ' ';
}
}
@@ -241,7 +242,7 @@ void Parsing::EmitPreprocessedSource(
if (ompConditionalLine) {
// Only digits can stay in the label field
if (!(ch >= '0' && ch <= '9')) {
- for (; column < 7; ++column) {
+ for (int toCol{ch == '&' ? 6 : 7}; column < toCol; ++column) {
out << ' ';
}
}
diff --git a/flang/lib/Parser/prescan.cpp b/flang/lib/Parser/prescan.cpp
index 46e04c1..3bc2ea0 100644
--- a/flang/lib/Parser/prescan.cpp
+++ b/flang/lib/Parser/prescan.cpp
@@ -150,10 +150,7 @@ void Prescanner::Statement() {
CHECK(*at_ == '!');
}
std::optional<int> condOffset;
- bool isOpenMPCondCompilation{
- directiveSentinel_[0] == '$' && directiveSentinel_[1] == '\0'};
- if (isOpenMPCondCompilation) {
- // OpenMP conditional compilation line.
+ if (InOpenMPConditionalLine()) {
condOffset = 2;
} else if (directiveSentinel_[0] == '@' && directiveSentinel_[1] == 'c' &&
directiveSentinel_[2] == 'u' && directiveSentinel_[3] == 'f' &&
@@ -167,19 +164,10 @@ void Prescanner::Statement() {
FortranInclude(at_ + *payload);
return;
}
- while (true) {
- if (auto n{IsSpace(at_)}) {
- at_ += n, ++column_;
- } else if (*at_ == '\t') {
- ++at_, ++column_;
- tabInCurrentLine_ = true;
- } else if (inFixedForm_ && column_ == 6 && !tabInCurrentLine_ &&
- *at_ == '0') {
- ++at_, ++column_;
- } else {
- break;
- }
+ if (inFixedForm_) {
+ LabelField(tokens);
}
+ SkipSpaces();
} else {
// Compiler directive. Emit normalized sentinel, squash following spaces.
// Conditional compilation lines (!$) take this path in -E mode too
@@ -190,35 +178,47 @@ void Prescanner::Statement() {
++sp, ++at_, ++column_) {
EmitChar(tokens, *sp);
}
- if (IsSpaceOrTab(at_)) {
- while (int n{IsSpaceOrTab(at_)}) {
- if (isOpenMPCondCompilation && inFixedForm_) {
+ if (inFixedForm_) {
+ while (column_ < 6) {
+ if (*at_ == '\t') {
+ tabInCurrentLine_ = true;
+ ++at_;
+ for (; column_ < 7; ++column_) {
+ EmitChar(tokens, ' ');
+ }
+ } else if (int spaceBytes{IsSpace(at_)}) {
EmitChar(tokens, ' ');
- }
- tabInCurrentLine_ |= *at_ == '\t';
- at_ += n, ++column_;
- if (inFixedForm_ && column_ > fixedFormColumnLimit_) {
+ at_ += spaceBytes;
+ ++column_;
+ } else {
+ if (InOpenMPConditionalLine() && column_ == 3 &&
+ IsDecimalDigit(*at_)) {
+ // subtle: !$ in -E mode can't be immediately followed by a digit
+ EmitChar(tokens, ' ');
+ }
break;
}
}
- if (isOpenMPCondCompilation && inFixedForm_ && column_ == 6) {
- if (*at_ == '0') {
- EmitChar(tokens, ' ');
- } else {
- tokens.CloseToken();
- EmitChar(tokens, '&');
- }
- ++at_, ++column_;
+ } else if (int spaceBytes{IsSpaceOrTab(at_)}) {
+ EmitChar(tokens, ' ');
+ at_ += spaceBytes, ++column_;
+ }
+ tokens.CloseToken();
+ SkipSpaces();
+ if (InOpenMPConditionalLine() && inFixedForm_ && !tabInCurrentLine_ &&
+ column_ == 6 && *at_ != '\n') {
+ // !$ 0 - turn '0' into a space
+ // !$ 1 - turn '1' into '&'
+ if (int n{IsSpace(at_)}; n || *at_ == '0') {
+ at_ += n ? n : 1;
} else {
- EmitChar(tokens, ' ');
+ ++at_;
+ EmitChar(tokens, '&');
+ tokens.CloseToken();
}
+ ++column_;
+ SkipSpaces();
}
- tokens.CloseToken();
- }
- if (*at_ == '!' || *at_ == '\n' ||
- (inFixedForm_ && column_ > fixedFormColumnLimit_ &&
- !tabInCurrentLine_)) {
- return; // Directive without payload
}
break;
}
@@ -323,8 +323,8 @@ void Prescanner::Statement() {
NormalizeCompilerDirectiveCommentMarker(*preprocessed);
preprocessed->ToLowerCase();
SourceFormChange(preprocessed->ToString());
- CheckAndEmitLine(preprocessed->ToLowerCase().ClipComment(
- *this, true /* skip first ! */),
+ CheckAndEmitLine(
+ preprocessed->ClipComment(*this, true /* skip first ! */),
newlineProvenance);
break;
case LineClassification::Kind::Source:
@@ -349,6 +349,24 @@ void Prescanner::Statement() {
while (CompilerDirectiveContinuation(tokens, line.sentinel)) {
newlineProvenance = GetCurrentProvenance();
}
+ if (preprocessingOnly_ && inFixedForm_ && InOpenMPConditionalLine() &&
+ nextLine_ < limit_) {
+ // In -E mode, when the line after !$ conditional compilation is a
+ // regular fixed form continuation line, append a '&' to the line.
+ const char *p{nextLine_};
+ int col{1};
+ while (int n{IsSpace(p)}) {
+ if (*p == '\t') {
+ break;
+ }
+ p += n;
+ ++col;
+ }
+ if (col == 6 && *p != '0' && *p != '\t' && *p != '\n') {
+ EmitChar(tokens, '&');
+ tokens.CloseToken();
+ }
+ }
tokens.ToLowerCase();
SourceFormChange(tokens.ToString());
} else { // Kind::Source
@@ -544,7 +562,8 @@ void Prescanner::SkipToEndOfLine() {
bool Prescanner::MustSkipToEndOfLine() const {
if (inFixedForm_ && column_ > fixedFormColumnLimit_ && !tabInCurrentLine_) {
return true; // skip over ignored columns in right margin (73:80)
- } else if (*at_ == '!' && !inCharLiteral_) {
+ } else if (*at_ == '!' && !inCharLiteral_ &&
+ (!inFixedForm_ || tabInCurrentLine_ || column_ != 6)) {
return !IsCompilerDirectiveSentinel(at_);
} else {
return false;
@@ -569,10 +588,11 @@ void Prescanner::NextChar() {
// directives, Fortran ! comments, stuff after the right margin in
// fixed form, and all forms of line continuation.
bool Prescanner::SkipToNextSignificantCharacter() {
- auto anyContinuationLine{false};
if (inPreprocessorDirective_) {
SkipCComments();
+ return false;
} else {
+ auto anyContinuationLine{false};
bool mightNeedSpace{false};
if (MustSkipToEndOfLine()) {
SkipToEndOfLine();
@@ -589,8 +609,8 @@ bool Prescanner::SkipToNextSignificantCharacter() {
if (*at_ == '\t') {
tabInCurrentLine_ = true;
}
+ return anyContinuationLine;
}
- return anyContinuationLine;
}
void Prescanner::SkipCComments() {
@@ -1119,12 +1139,10 @@ static bool IsAtProcess(const char *p) {
bool Prescanner::IsFixedFormCommentLine(const char *start) const {
const char *p{start};
-
// The @process directive must start in column 1.
if (*p == '@' && IsAtProcess(p)) {
return true;
}
-
if (IsFixedFormCommentChar(*p) || *p == '%' || // VAX %list, %eject, &c.
((*p == 'D' || *p == 'd') &&
!features_.IsEnabled(LanguageFeature::OldDebugLines))) {
@@ -1324,24 +1342,11 @@ const char *Prescanner::FixedFormContinuationLine(bool mightNeedSpace) {
features_.IsEnabled(LanguageFeature::OldDebugLines))) &&
nextLine_[1] == ' ' && nextLine_[2] == ' ' && nextLine_[3] == ' ' &&
nextLine_[4] == ' '};
- if (InCompilerDirective()) {
- if (directiveSentinel_[0] == '$' && directiveSentinel_[1] == '\0') {
- if (IsFixedFormCommentChar(col1)) {
- if (nextLine_[1] == '$' &&
- (nextLine_[2] == '&' || IsSpaceOrTab(&nextLine_[2]))) {
- // Next line is also !$ conditional compilation, might be continuation
- if (preprocessingOnly_) {
- return nullptr;
- }
- } else {
- return nullptr; // comment, or distinct directive
- }
- } else if (!canBeNonDirectiveContinuation) {
- return nullptr;
- }
- } else if (!IsFixedFormCommentChar(col1)) {
- return nullptr; // in directive other than !$, but next line is not
- } else { // in directive other than !$, next line might be continuation
+ if (InCompilerDirective() &&
+ !(InOpenMPConditionalLine() && !preprocessingOnly_)) {
+ // !$ under -E is not continued, but deferred to later compilation
+ if (IsFixedFormCommentChar(col1) &&
+ !(InOpenMPConditionalLine() && preprocessingOnly_)) {
int j{1};
for (; j < 5; ++j) {
char ch{directiveSentinel_[j - 1]};
@@ -1356,31 +1361,27 @@ const char *Prescanner::FixedFormContinuationLine(bool mightNeedSpace) {
return nullptr;
}
}
- }
- const char *col6{nextLine_ + 5};
- if (*col6 != '\n' && *col6 != '0' && !IsSpaceOrTab(col6)) {
- if (mightNeedSpace && !IsSpace(nextLine_ + 6)) {
- insertASpace_ = true;
+ const char *col6{nextLine_ + 5};
+ if (*col6 != '\n' && *col6 != '0' && !IsSpaceOrTab(col6)) {
+ if (mightNeedSpace && !IsSpace(nextLine_ + 6)) {
+ insertASpace_ = true;
+ }
+ return nextLine_ + 6;
}
- return nextLine_ + 6;
}
- } else {
- // Normal case: not in a compiler directive.
- if (IsFixedFormCommentChar(col1)) {
- if (nextLine_[1] == '$' && nextLine_[2] == ' ' && nextLine_[3] == ' ' &&
- nextLine_[4] == ' ' &&
- IsCompilerDirectiveSentinel(&nextLine_[1], 1) &&
- !preprocessingOnly_) {
- // !$ conditional compilation line as a continuation
- const char *col6{nextLine_ + 5};
- if (*col6 != '\n' && *col6 != '0' && !IsSpaceOrTab(col6)) {
- if (mightNeedSpace && !IsSpace(nextLine_ + 6)) {
- insertASpace_ = true;
- }
- return nextLine_ + 6;
- }
+ } else { // Normal case: not in a compiler directive.
+ // !$ conditional compilation lines may be continuations when not
+ // just preprocessing.
+ if (!preprocessingOnly_ && IsFixedFormCommentChar(col1) &&
+ nextLine_[1] == '$' && nextLine_[2] == ' ' && nextLine_[3] == ' ' &&
+ nextLine_[4] == ' ' && IsCompilerDirectiveSentinel(&nextLine_[1], 1)) {
+ if (const char *col6{nextLine_ + 5};
+ *col6 != '\n' && *col6 != '0' && !IsSpaceOrTab(col6)) {
+ insertASpace_ |= mightNeedSpace && !IsSpace(nextLine_ + 6);
+ return nextLine_ + 6;
+ } else {
+ return nullptr;
}
- return nullptr;
}
if (col1 == '&' &&
features_.IsEnabled(
@@ -1422,13 +1423,13 @@ const char *Prescanner::FreeFormContinuationLine(bool ampersand) {
}
p = SkipWhiteSpaceIncludingEmptyMacros(p);
if (InCompilerDirective()) {
- if (directiveSentinel_[0] == '$' && directiveSentinel_[1] == '\0') {
+ if (InOpenMPConditionalLine()) {
if (preprocessingOnly_) {
// in -E mode, don't treat !$ as a continuation
return nullptr;
} else if (p[0] == '!' && p[1] == '$') {
// accept but do not require a matching sentinel
- if (!(p[2] == '&' || IsSpaceOrTab(&p[2]))) {
+ if (p[2] != '&' && !IsSpaceOrTab(&p[2])) {
return nullptr; // not !$
}
p += 2;
@@ -1566,15 +1567,11 @@ Prescanner::IsFixedFormCompilerDirectiveLine(const char *start) const {
}
char sentinel[5], *sp{sentinel};
int column{2};
- for (; column < 6; ++column, ++p) {
- if (*p == '\n' || IsSpaceOrTab(p)) {
- break;
- }
- if (sp == sentinel + 1 && sentinel[0] == '$' && IsDecimalDigit(*p)) {
- // OpenMP conditional compilation line: leave the label alone
+ for (; column < 6; ++column) {
+ if (*p == '\n' || IsSpaceOrTab(p) || IsDecimalDigit(*p)) {
break;
}
- *sp++ = ToLowerCaseLetter(*p);
+ *sp++ = ToLowerCaseLetter(*p++);
}
if (sp == sentinel) {
return std::nullopt;
@@ -1600,7 +1597,8 @@ Prescanner::IsFixedFormCompilerDirectiveLine(const char *start) const {
++p;
} else if (int n{IsSpaceOrTab(p)}) {
p += n;
- } else if (isOpenMPConditional && preprocessingOnly_ && !hadDigit) {
+ } else if (isOpenMPConditional && preprocessingOnly_ && !hadDigit &&
+ *p != '\n') {
// In -E mode, "!$ &" is treated as a directive
} else {
// This is a Continuation line, not an initial directive line.
@@ -1671,14 +1669,14 @@ const char *Prescanner::IsCompilerDirectiveSentinel(CharBlock token) const {
std::optional<std::pair<const char *, const char *>>
Prescanner::IsCompilerDirectiveSentinel(const char *p) const {
char sentinel[8];
- for (std::size_t j{0}; j + 1 < sizeof sentinel && *p != '\n'; ++p, ++j) {
+ for (std::size_t j{0}; j + 1 < sizeof sentinel; ++p, ++j) {
if (int n{IsSpaceOrTab(p)};
n || !(IsLetter(*p) || *p == '$' || *p == '@')) {
if (j > 0) {
- if (j == 1 && sentinel[0] == '$' && n == 0 && *p != '&') {
- // OpenMP conditional compilation line sentinels have to
+ if (j == 1 && sentinel[0] == '$' && n == 0 && *p != '&' && *p != '\n') {
+ // Free form OpenMP conditional compilation line sentinels have to
// be immediately followed by a space or &, not a digit
- // or anything else.
+ // or anything else. A newline also works for an initial line.
break;
}
sentinel[j] = '\0';
diff --git a/flang/lib/Parser/prescan.h b/flang/lib/Parser/prescan.h
index 53361ba..ec4c53c 100644
--- a/flang/lib/Parser/prescan.h
+++ b/flang/lib/Parser/prescan.h
@@ -159,6 +159,11 @@ private:
}
bool InCompilerDirective() const { return directiveSentinel_ != nullptr; }
+ bool InOpenMPConditionalLine() const {
+ return directiveSentinel_ && directiveSentinel_[0] == '$' &&
+ !directiveSentinel_[1];
+ ;
+ }
bool InFixedFormSource() const {
return inFixedForm_ && !inPreprocessorDirective_ && !InCompilerDirective();
}
diff --git a/flang/lib/Parser/token-sequence.cpp b/flang/lib/Parser/token-sequence.cpp
index aee7693..40a074e 100644
--- a/flang/lib/Parser/token-sequence.cpp
+++ b/flang/lib/Parser/token-sequence.cpp
@@ -357,7 +357,7 @@ ProvenanceRange TokenSequence::GetProvenanceRange() const {
const TokenSequence &TokenSequence::CheckBadFortranCharacters(
Messages &messages, const Prescanner &prescanner,
- bool allowAmpersand) const {
+ bool preprocessingOnly) const {
std::size_t tokens{SizeInTokens()};
for (std::size_t j{0}; j < tokens; ++j) {
CharBlock token{TokenAt(j)};
@@ -371,8 +371,10 @@ const TokenSequence &TokenSequence::CheckBadFortranCharacters(
TokenAt(j + 1))) { // !dir$, &c.
++j;
continue;
+ } else if (preprocessingOnly) {
+ continue;
}
- } else if (ch == '&' && allowAmpersand) {
+ } else if (ch == '&' && preprocessingOnly) {
continue;
}
if (ch < ' ' || ch >= '\x7f') {
diff --git a/flang/lib/Semantics/check-allocate.cpp b/flang/lib/Semantics/check-allocate.cpp
index b426dd8..2c215f4 100644
--- a/flang/lib/Semantics/check-allocate.cpp
+++ b/flang/lib/Semantics/check-allocate.cpp
@@ -116,13 +116,19 @@ static std::optional<AllocateCheckerInfo> CheckAllocateOptions(
// C937
if (auto it{FindCoarrayUltimateComponent(*derived)}) {
context
- .Say("Type-spec in ALLOCATE must not specify a type with a coarray"
- " ultimate component"_err_en_US)
+ .Say(
+ "Type-spec in ALLOCATE must not specify a type with a coarray ultimate component"_err_en_US)
.Attach(it->name(),
"Type '%s' has coarray ultimate component '%s' declared here"_en_US,
info.typeSpec->AsFortran(), it.BuildResultDesignatorName());
}
}
+ if (auto dyType{evaluate::DynamicType::From(*info.typeSpec)}) {
+ if (dyType->HasDeferredTypeParameter()) {
+ context.Say(
+ "Type-spec in ALLOCATE must not have a deferred type parameter"_err_en_US);
+ }
+ }
}
const parser::Expr *parserSourceExpr{nullptr};
diff --git a/flang/lib/Semantics/check-call.cpp b/flang/lib/Semantics/check-call.cpp
index 1192886..3cf95fd 100644
--- a/flang/lib/Semantics/check-call.cpp
+++ b/flang/lib/Semantics/check-call.cpp
@@ -581,20 +581,38 @@ static void CheckExplicitDataArg(const characteristics::DummyDataObject &dummy,
"Polymorphic scalar may not be associated with a %s array"_err_en_US,
dummyName);
}
+ bool isOkBecauseContiguous{
+ context.IsEnabled(
+ common::LanguageFeature::ContiguousOkForSeqAssociation) &&
+ actualLastSymbol &&
+ evaluate::IsContiguous(*actualLastSymbol, foldingContext)};
if (actualIsArrayElement && actualLastSymbol &&
- !evaluate::IsContiguous(*actualLastSymbol, foldingContext) &&
!dummy.ignoreTKR.test(common::IgnoreTKR::Contiguous)) {
if (IsPointer(*actualLastSymbol)) {
- basicError = true;
- messages.Say(
- "Element of pointer array may not be associated with a %s array"_err_en_US,
- dummyName);
+ if (isOkBecauseContiguous) {
+ context.Warn(
+ common::LanguageFeature::ContiguousOkForSeqAssociation,
+ messages.at(),
+ "Element of contiguous pointer array is accepted for storage sequence association"_port_en_US);
+ } else {
+ basicError = true;
+ messages.Say(
+ "Element of pointer array may not be associated with a %s array"_err_en_US,
+ dummyName);
+ }
} else if (IsAssumedShape(*actualLastSymbol) &&
!dummy.ignoreTKR.test(common::IgnoreTKR::Contiguous)) {
- basicError = true;
- messages.Say(
- "Element of assumed-shape array may not be associated with a %s array"_err_en_US,
- dummyName);
+ if (isOkBecauseContiguous) {
+ context.Warn(
+ common::LanguageFeature::ContiguousOkForSeqAssociation,
+ messages.at(),
+ "Element of contiguous assumed-shape array is accepted for storage sequence association"_port_en_US);
+ } else {
+ basicError = true;
+ messages.Say(
+ "Element of assumed-shape array may not be associated with a %s array"_err_en_US,
+ dummyName);
+ }
}
}
}
@@ -754,12 +772,13 @@ static void CheckExplicitDataArg(const characteristics::DummyDataObject &dummy,
}
}
- // Cases when temporaries might be needed but must not be permitted.
+ bool dummyIsContiguous{
+ dummy.attrs.test(characteristics::DummyDataObject::Attr::Contiguous)};
bool actualIsContiguous{IsSimplyContiguous(actual, foldingContext)};
+
+ // Cases when temporaries might be needed but must not be permitted.
bool dummyIsAssumedShape{dummy.type.attrs().test(
characteristics::TypeAndShape::Attr::AssumedShape)};
- bool dummyIsContiguous{
- dummy.attrs.test(characteristics::DummyDataObject::Attr::Contiguous)};
if ((actualIsAsynchronous || actualIsVolatile) &&
(dummyIsAsynchronous || dummyIsVolatile) && !dummyIsValue) {
if (actualCoarrayRef) { // C1538
@@ -834,7 +853,7 @@ static void CheckExplicitDataArg(const characteristics::DummyDataObject &dummy,
if (scope) {
semantics::CheckPointerAssignment(context, messages.at(), dummyName,
dummy, actual, *scope,
- /*isAssumedRank=*/dummyIsAssumedRank);
+ /*isAssumedRank=*/dummyIsAssumedRank, actualIsPointer);
}
} else if (!actualIsPointer) {
messages.Say(
diff --git a/flang/lib/Semantics/check-coarray.cpp b/flang/lib/Semantics/check-coarray.cpp
index b21e3cd..0e444f1 100644
--- a/flang/lib/Semantics/check-coarray.cpp
+++ b/flang/lib/Semantics/check-coarray.cpp
@@ -373,41 +373,12 @@ void CoarrayChecker::Leave(const parser::CriticalStmt &x) {
}
void CoarrayChecker::Leave(const parser::ImageSelector &imageSelector) {
- haveStat_ = false;
- haveTeam_ = false;
- haveTeamNumber_ = false;
for (const auto &imageSelectorSpec :
std::get<std::list<parser::ImageSelectorSpec>>(imageSelector.t)) {
- if (const auto *team{
- std::get_if<parser::TeamValue>(&imageSelectorSpec.u)}) {
- if (haveTeam_) {
- context_.Say(parser::FindSourceLocation(imageSelectorSpec), // C929
- "TEAM value can only be specified once"_err_en_US);
- }
- CheckTeamType(context_, *team);
- haveTeam_ = true;
- }
if (const auto *stat{std::get_if<parser::ImageSelectorSpec::Stat>(
&imageSelectorSpec.u)}) {
- if (haveStat_) {
- context_.Say(parser::FindSourceLocation(imageSelectorSpec), // C929
- "STAT variable can only be specified once"_err_en_US);
- }
CheckTeamStat(context_, *stat);
- haveStat_ = true;
}
- if (std::get_if<parser::ImageSelectorSpec::Team_Number>(
- &imageSelectorSpec.u)) {
- if (haveTeamNumber_) {
- context_.Say(parser::FindSourceLocation(imageSelectorSpec), // C929
- "TEAM_NUMBER value can only be specified once"_err_en_US);
- }
- haveTeamNumber_ = true;
- }
- }
- if (haveTeam_ && haveTeamNumber_) {
- context_.Say(parser::FindSourceLocation(imageSelector), // C930
- "Cannot specify both TEAM and TEAM_NUMBER"_err_en_US);
}
}
diff --git a/flang/lib/Semantics/check-coarray.h b/flang/lib/Semantics/check-coarray.h
index f156959..51de47f 100644
--- a/flang/lib/Semantics/check-coarray.h
+++ b/flang/lib/Semantics/check-coarray.h
@@ -37,9 +37,6 @@ public:
private:
SemanticsContext &context_;
- bool haveStat_;
- bool haveTeam_;
- bool haveTeamNumber_;
void CheckNamesAreDistinct(const std::list<parser::CoarrayAssociation> &);
void Say2(const parser::CharBlock &, parser::MessageFixedText &&,
diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp
index 3180855..a86f781 100644
--- a/flang/lib/Semantics/check-declarations.cpp
+++ b/flang/lib/Semantics/check-declarations.cpp
@@ -1192,7 +1192,7 @@ void CheckHelper::CheckObjectEntity(
typeName);
} else if (evaluate::IsAssumedRank(symbol)) {
SayWithDeclaration(symbol,
- "Assumed Rank entity of %s type is not supported"_err_en_US,
+ "Assumed rank entity of %s type is not supported"_err_en_US,
typeName);
}
}
@@ -2555,6 +2555,9 @@ void CheckHelper::CheckProcBinding(
const Symbol &symbol, const ProcBindingDetails &binding) {
const Scope &dtScope{symbol.owner()};
CHECK(dtScope.kind() == Scope::Kind::DerivedType);
+ bool isInaccessibleDeferred{false};
+ const Symbol *overridden{
+ FindOverriddenBinding(symbol, isInaccessibleDeferred)};
if (symbol.attrs().test(Attr::DEFERRED)) {
if (const Symbol *dtSymbol{dtScope.symbol()}) {
if (!dtSymbol->attrs().test(Attr::ABSTRACT)) { // C733
@@ -2568,6 +2571,11 @@ void CheckHelper::CheckProcBinding(
"Type-bound procedure '%s' may not be both DEFERRED and NON_OVERRIDABLE"_err_en_US,
symbol.name());
}
+ if (overridden && !overridden->attrs().test(Attr::DEFERRED)) {
+ SayWithDeclaration(*overridden,
+ "Override of non-DEFERRED '%s' must not be DEFERRED"_err_en_US,
+ symbol.name());
+ }
}
if (binding.symbol().attrs().test(Attr::INTRINSIC) &&
!context_.intrinsics().IsSpecificIntrinsicFunction(
@@ -2576,9 +2584,7 @@ void CheckHelper::CheckProcBinding(
"Intrinsic procedure '%s' is not a specific intrinsic permitted for use in the definition of binding '%s'"_err_en_US,
binding.symbol().name(), symbol.name());
}
- bool isInaccessibleDeferred{false};
- if (const Symbol *
- overridden{FindOverriddenBinding(symbol, isInaccessibleDeferred)}) {
+ if (overridden) {
if (isInaccessibleDeferred) {
SayWithDeclaration(*overridden,
"Override of PRIVATE DEFERRED '%s' must appear in its module"_err_en_US,
@@ -3414,7 +3420,13 @@ void CheckHelper::CheckBindC(const Symbol &symbol) {
bool CheckHelper::CheckDioDummyIsData(
const Symbol &subp, const Symbol *arg, std::size_t position) {
if (arg && arg->detailsIf<ObjectEntityDetails>()) {
- return true;
+ if (evaluate::IsAssumedRank(*arg)) {
+ messages_.Say(arg->name(),
+ "Dummy argument '%s' may not be assumed-rank"_err_en_US, arg->name());
+ return false;
+ } else {
+ return true;
+ }
} else {
if (arg) {
messages_.Say(arg->name(),
@@ -3592,9 +3604,10 @@ void CheckHelper::CheckDioVlistArg(
CheckDioDummyIsDefaultInteger(subp, *arg);
CheckDioDummyAttrs(subp, *arg, Attr::INTENT_IN);
const auto *objectDetails{arg->detailsIf<ObjectEntityDetails>()};
- if (!objectDetails || !objectDetails->shape().CanBeAssumedShape()) {
+ if (!objectDetails || !objectDetails->shape().CanBeAssumedShape() ||
+ objectDetails->shape().Rank() != 1) {
messages_.Say(arg->name(),
- "Dummy argument '%s' of a defined input/output procedure must be assumed shape"_err_en_US,
+ "Dummy argument '%s' of a defined input/output procedure must be assumed shape vector"_err_en_US,
arg->name());
}
}
diff --git a/flang/lib/Semantics/dump-expr.cpp b/flang/lib/Semantics/dump-expr.cpp
index 850904b..aa0b4e0 100644
--- a/flang/lib/Semantics/dump-expr.cpp
+++ b/flang/lib/Semantics/dump-expr.cpp
@@ -22,7 +22,6 @@ inline const char *DumpEvaluateExpr::GetIndentString() const {
void DumpEvaluateExpr::Show(const evaluate::CoarrayRef &x) {
Indent("coarray ref");
Show(x.base());
- Show(x.subscript());
Show(x.cosubscript());
Show(x.stat());
Show(x.team());
diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp
index e139bda..0659536 100644
--- a/flang/lib/Semantics/expression.cpp
+++ b/flang/lib/Semantics/expression.cpp
@@ -419,13 +419,9 @@ static void CheckSubscripts(
}
}
-static void CheckSubscripts(
+static void CheckCosubscripts(
semantics::SemanticsContext &context, CoarrayRef &ref) {
- const Symbol &coarraySymbol{ref.GetBase().GetLastSymbol()};
- Shape lb, ub;
- if (FoldSubscripts(context, coarraySymbol, ref.subscript(), lb, ub)) {
- ValidateSubscripts(context, coarraySymbol, ref.subscript(), lb, ub);
- }
+ const Symbol &coarraySymbol{ref.GetLastSymbol()};
FoldingContext &foldingContext{context.foldingContext()};
int dim{0};
for (auto &expr : ref.cosubscript()) {
@@ -1534,29 +1530,10 @@ MaybeExpr ExpressionAnalyzer::Analyze(const parser::StructureComponent &sc) {
}
MaybeExpr ExpressionAnalyzer::Analyze(const parser::CoindexedNamedObject &x) {
- if (auto maybeDataRef{ExtractDataRef(Analyze(x.base))}) {
- DataRef *dataRef{&*maybeDataRef};
- std::vector<Subscript> subscripts;
- SymbolVector reversed;
- if (auto *aRef{std::get_if<ArrayRef>(&dataRef->u)}) {
- subscripts = std::move(aRef->subscript());
- reversed.push_back(aRef->GetLastSymbol());
- if (Component *component{aRef->base().UnwrapComponent()}) {
- dataRef = &component->base();
- } else {
- dataRef = nullptr;
- }
- }
- if (dataRef) {
- while (auto *component{std::get_if<Component>(&dataRef->u)}) {
- reversed.push_back(component->GetLastSymbol());
- dataRef = &component->base();
- }
- if (auto *baseSym{std::get_if<SymbolRef>(&dataRef->u)}) {
- reversed.push_back(*baseSym);
- } else {
- Say("Base of coindexed named object has subscripts or cosubscripts"_err_en_US);
- }
+ if (auto dataRef{ExtractDataRef(Analyze(x.base))}) {
+ if (!std::holds_alternative<ArrayRef>(dataRef->u) &&
+ dataRef->GetLastSymbol().Rank() > 0) { // F'2023 C916
+ Say("Subscripts must appear in a coindexed reference when its base is an array"_err_en_US);
}
std::vector<Expr<SubscriptInteger>> cosubscripts;
bool cosubsOk{true};
@@ -1570,30 +1547,59 @@ MaybeExpr ExpressionAnalyzer::Analyze(const parser::CoindexedNamedObject &x) {
cosubsOk = false;
}
}
- if (cosubsOk && !reversed.empty()) {
+ if (cosubsOk) {
int numCosubscripts{static_cast<int>(cosubscripts.size())};
- const Symbol &symbol{reversed.front()};
+ const Symbol &symbol{dataRef->GetLastSymbol()};
if (numCosubscripts != GetCorank(symbol)) {
Say("'%s' has corank %d, but coindexed reference has %d cosubscripts"_err_en_US,
symbol.name(), GetCorank(symbol), numCosubscripts);
}
}
+ CoarrayRef coarrayRef{std::move(*dataRef), std::move(cosubscripts)};
for (const auto &imageSelSpec :
std::get<std::list<parser::ImageSelectorSpec>>(x.imageSelector.t)) {
common::visit(
common::visitors{
- [&](const auto &x) { Analyze(x.v); },
- },
+ [&](const parser::ImageSelectorSpec::Stat &x) {
+ Analyze(x.v);
+ if (const auto *expr{GetExpr(context_, x.v)}) {
+ if (const auto *intExpr{
+ std::get_if<Expr<SomeInteger>>(&expr->u)}) {
+ if (coarrayRef.stat()) {
+ Say("coindexed reference has multiple STAT= specifiers"_err_en_US);
+ } else {
+ coarrayRef.set_stat(Expr<SomeInteger>{*intExpr});
+ }
+ }
+ }
+ },
+ [&](const parser::TeamValue &x) {
+ Analyze(x.v);
+ if (const auto *expr{GetExpr(context_, x.v)}) {
+ if (coarrayRef.team()) {
+ Say("coindexed reference has multiple TEAM= or TEAM_NUMBER= specifiers"_err_en_US);
+ } else if (auto dyType{expr->GetType()};
+ dyType && IsTeamType(GetDerivedTypeSpec(*dyType))) {
+ coarrayRef.set_team(Expr<SomeType>{*expr});
+ } else {
+ Say("TEAM= specifier must have type TEAM_TYPE from ISO_FORTRAN_ENV"_err_en_US);
+ }
+ }
+ },
+ [&](const parser::ImageSelectorSpec::Team_Number &x) {
+ Analyze(x.v);
+ if (const auto *expr{GetExpr(context_, x.v)}) {
+ if (coarrayRef.team()) {
+ Say("coindexed reference has multiple TEAM= or TEAM_NUMBER= specifiers"_err_en_US);
+ } else {
+ coarrayRef.set_team(Expr<SomeType>{*expr});
+ }
+ }
+ }},
imageSelSpec.u);
}
- // Reverse the chain of symbols so that the base is first and coarray
- // ultimate component is last.
- if (cosubsOk) {
- CoarrayRef coarrayRef{SymbolVector{reversed.crbegin(), reversed.crend()},
- std::move(subscripts), std::move(cosubscripts)};
- CheckSubscripts(context_, coarrayRef);
- return Designate(DataRef{std::move(coarrayRef)});
- }
+ CheckCosubscripts(context_, coarrayRef);
+ return Designate(DataRef{std::move(coarrayRef)});
}
return std::nullopt;
}
diff --git a/flang/lib/Semantics/mod-file.cpp b/flang/lib/Semantics/mod-file.cpp
index 3ea37ce..a1ec956 100644
--- a/flang/lib/Semantics/mod-file.cpp
+++ b/flang/lib/Semantics/mod-file.cpp
@@ -1548,6 +1548,7 @@ Scope *ModFileReader::Read(SourceName name, std::optional<bool> isIntrinsic,
// created under -fhermetic-module-files? If so, process them first in
// their own nested scope that will be visible only to USE statements
// within the module file.
+ Scope *previousHermetic{context_.currentHermeticModuleFileScope()};
if (parseTree.v.size() > 1) {
parser::Program hermeticModules{std::move(parseTree.v)};
parseTree.v.emplace_back(std::move(hermeticModules.v.front()));
@@ -1563,7 +1564,7 @@ Scope *ModFileReader::Read(SourceName name, std::optional<bool> isIntrinsic,
GetModuleDependences(context_.moduleDependences(), sourceFile->content());
ResolveNames(context_, parseTree, topScope);
context_.foldingContext().set_moduleFileName(wasModuleFileName);
- context_.set_currentHermeticModuleFileScope(nullptr);
+ context_.set_currentHermeticModuleFileScope(previousHermetic);
if (!moduleSymbol) {
// Submodule symbols' storage are owned by their parents' scopes,
// but their names are not in their parents' dictionaries -- we
diff --git a/flang/lib/Semantics/pointer-assignment.cpp b/flang/lib/Semantics/pointer-assignment.cpp
index c17eb0a..0908769 100644
--- a/flang/lib/Semantics/pointer-assignment.cpp
+++ b/flang/lib/Semantics/pointer-assignment.cpp
@@ -59,6 +59,7 @@ public:
PointerAssignmentChecker &set_isBoundsRemapping(bool);
PointerAssignmentChecker &set_isAssumedRank(bool);
PointerAssignmentChecker &set_pointerComponentLHS(const Symbol *);
+ PointerAssignmentChecker &set_isRHSPointerActualArgument(bool);
bool CheckLeftHandSide(const SomeExpr &);
bool Check(const SomeExpr &);
@@ -94,6 +95,7 @@ private:
bool isVolatile_{false};
bool isBoundsRemapping_{false};
bool isAssumedRank_{false};
+ bool isRHSPointerActualArgument_{false};
const Symbol *pointerComponentLHS_{nullptr};
};
@@ -133,6 +135,12 @@ PointerAssignmentChecker &PointerAssignmentChecker::set_pointerComponentLHS(
return *this;
}
+PointerAssignmentChecker &
+PointerAssignmentChecker::set_isRHSPointerActualArgument(bool isPointerActual) {
+ isRHSPointerActualArgument_ = isPointerActual;
+ return *this;
+}
+
bool PointerAssignmentChecker::CharacterizeProcedure() {
if (!characterizedProcedure_) {
characterizedProcedure_ = true;
@@ -221,6 +229,9 @@ bool PointerAssignmentChecker::Check(const SomeExpr &rhs) {
Say("CONTIGUOUS pointer may not be associated with a discontiguous target"_err_en_US);
return false;
}
+ } else if (isRHSPointerActualArgument_) {
+ Say("CONTIGUOUS pointer dummy argument may not be associated with non-CONTIGUOUS pointer actual argument"_err_en_US);
+ return false;
} else {
Warn(common::UsageWarning::PointerToPossibleNoncontiguous,
"Target of CONTIGUOUS pointer association is not known to be contiguous"_warn_en_US);
@@ -590,12 +601,14 @@ bool CheckStructConstructorPointerComponent(SemanticsContext &context,
bool CheckPointerAssignment(SemanticsContext &context, parser::CharBlock source,
const std::string &description, const DummyDataObject &lhs,
- const SomeExpr &rhs, const Scope &scope, bool isAssumedRank) {
+ const SomeExpr &rhs, const Scope &scope, bool isAssumedRank,
+ bool isPointerActualArgument) {
return PointerAssignmentChecker{context, scope, source, description}
.set_lhsType(common::Clone(lhs.type))
.set_isContiguous(lhs.attrs.test(DummyDataObject::Attr::Contiguous))
.set_isVolatile(lhs.attrs.test(DummyDataObject::Attr::Volatile))
.set_isAssumedRank(isAssumedRank)
+ .set_isRHSPointerActualArgument(isPointerActualArgument)
.Check(rhs);
}
diff --git a/flang/lib/Semantics/pointer-assignment.h b/flang/lib/Semantics/pointer-assignment.h
index 269d641..ad7c655 100644
--- a/flang/lib/Semantics/pointer-assignment.h
+++ b/flang/lib/Semantics/pointer-assignment.h
@@ -31,7 +31,7 @@ bool CheckPointerAssignment(SemanticsContext &, const SomeExpr &lhs,
bool CheckPointerAssignment(SemanticsContext &, parser::CharBlock source,
const std::string &description,
const evaluate::characteristics::DummyDataObject &, const SomeExpr &rhs,
- const Scope &, bool isAssumedRank);
+ const Scope &, bool isAssumedRank, bool IsPointerActualArgument);
bool CheckStructConstructorPointerComponent(
SemanticsContext &, const Symbol &lhs, const SomeExpr &rhs, const Scope &);
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index b297969..bdafc03 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -6350,6 +6350,10 @@ void DeclarationVisitor::Post(const parser::ProcDecl &x) {
if (!dtDetails) {
attrs.set(Attr::EXTERNAL);
}
+ if (derivedTypeInfo_.privateComps &&
+ !attrs.HasAny({Attr::PUBLIC, Attr::PRIVATE})) {
+ attrs.set(Attr::PRIVATE);
+ }
Symbol &symbol{DeclareProcEntity(name, attrs, procInterface)};
SetCUDADataAttr(name.source, symbol, cudaDataAttr()); // for error
symbol.ReplaceName(name.source);
diff --git a/flang/lib/Semantics/tools.cpp b/flang/lib/Semantics/tools.cpp
index 08d2605..1d1e3ac 100644
--- a/flang/lib/Semantics/tools.cpp
+++ b/flang/lib/Semantics/tools.cpp
@@ -1076,7 +1076,7 @@ std::optional<parser::MessageFormattedText> CheckAccessibleSymbol(
return std::nullopt;
} else {
return parser::MessageFormattedText{
- "PRIVATE name '%s' is only accessible within module '%s'"_err_en_US,
+ "PRIVATE name '%s' is accessible only within module '%s'"_err_en_US,
symbol.name(),
DEREF(FindModuleContaining(symbol.owner())).GetName().value()};
}
diff --git a/flang/test/Lower/CUDA/cuda-managed.cuf b/flang/test/Lower/CUDA/cuda-managed.cuf
new file mode 100644
index 0000000..e14bd84
--- /dev/null
+++ b/flang/test/Lower/CUDA/cuda-managed.cuf
@@ -0,0 +1,27 @@
+! RUN: bbc -emit-hlfir -fcuda %s -o - | FileCheck %s
+
+subroutine testr2(N1,N2)
+ real(4), managed :: ai4(N1,N2)
+ real(4), allocatable :: bRefi4(:)
+
+ integer :: i1, i2
+
+ do i2 = 1, N2
+ do i1 = 1, N1
+ ai4(i1,i2) = i1 + N1*(i2-1)
+ enddo
+ enddo
+
+ allocate(bRefi4 (N1))
+ do i1 = 1, N1
+ bRefi4(i1) = (ai4(i1,1)+ai4(i1,N2))*N2/2
+ enddo
+ deallocate(bRefi4)
+
+end subroutine
+
+!CHECK-LABEL: func.func @_QPtestr2
+!CHECK: %[[ALLOC:.*]] = cuf.alloc !fir.array<?x?xf32>, %{{.*}}, %{{.*}} : index, index {bindc_name = "ai4", data_attr = #cuf.cuda<managed>, uniq_name = "_QFtestr2Eai4"} -> !fir.ref<!fir.array<?x?xf32>>
+!CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOC]](%{{.*}}) {data_attr = #cuf.cuda<managed>, uniq_name = "_QFtestr2Eai4"} : (!fir.ref<!fir.array<?x?xf32>>, !fir.shape<2>) -> (!fir.box<!fir.array<?x?xf32>>, !fir.ref<!fir.array<?x?xf32>>)
+!CHECK: %[[DEST:.*]] = hlfir.designate %[[DECLARE]]#0 (%{{.*}}, %{{.*}}) : (!fir.box<!fir.array<?x?xf32>>, i64, i64) -> !fir.ref<f32>
+!CHECK: cuf.data_transfer %{{.*}}#0 to %[[DEST]] {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<f32>, !fir.ref<f32>
diff --git a/flang/test/Lower/HLFIR/call-postponed-associate.f90 b/flang/test/Lower/HLFIR/call-postponed-associate.f90
new file mode 100644
index 0000000..18df62b
--- /dev/null
+++ b/flang/test/Lower/HLFIR/call-postponed-associate.f90
@@ -0,0 +1,85 @@
+! RUN: bbc -emit-hlfir -o - %s -I nowhere | FileCheck %s
+
+subroutine test1
+ interface
+ function array_func1(x)
+ real:: x, array_func1(10)
+ end function array_func1
+ end interface
+ real :: x(10)
+ x = array_func1(1.0)
+end subroutine test1
+! CHECK-LABEL: func.func @_QPtest1() {
+! CHECK: %[[VAL_5:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: %[[VAL_6:.*]]:3 = hlfir.associate %[[VAL_5]] {adapt.valuebyref} : (f32) -> (!fir.ref<f32>, !fir.ref<f32>, i1)
+! CHECK: %[[VAL_17:.*]] = hlfir.eval_in_mem shape %{{.*}} : (!fir.shape<1>) -> !hlfir.expr<10xf32> {
+! CHECK: fir.call @_QParray_func1
+! CHECK: fir.save_result
+! CHECK: }
+! CHECK: hlfir.assign %[[VAL_17]] to %{{.*}} : !hlfir.expr<10xf32>, !fir.ref<!fir.array<10xf32>>
+! CHECK: hlfir.end_associate %[[VAL_6]]#1, %[[VAL_6]]#2 : !fir.ref<f32>, i1
+
+subroutine test2(x)
+ interface
+ function array_func2(x,y)
+ real:: x(*), array_func2(10), y
+ end function array_func2
+ end interface
+ real :: x(:)
+ x = array_func2(x, 1.0)
+end subroutine test2
+! CHECK-LABEL: func.func @_QPtest2(
+! CHECK: %[[VAL_3:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.copy_in %{{.*}} to %{{.*}} : (!fir.box<!fir.array<?xf32>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> (!fir.box<!fir.array<?xf32>>, i1)
+! CHECK: %[[VAL_5:.*]] = fir.box_addr %[[VAL_4]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
+! CHECK: %[[VAL_6:.*]]:3 = hlfir.associate %[[VAL_3]] {adapt.valuebyref} : (f32) -> (!fir.ref<f32>, !fir.ref<f32>, i1)
+! CHECK: %[[VAL_17:.*]] = hlfir.eval_in_mem shape %{{.*}} : (!fir.shape<1>) -> !hlfir.expr<10xf32> {
+! CHECK: ^bb0(%[[VAL_18:.*]]: !fir.ref<!fir.array<10xf32>>):
+! CHECK: %[[VAL_19:.*]] = fir.call @_QParray_func2(%[[VAL_5]], %[[VAL_6]]#0) fastmath<contract> : (!fir.ref<!fir.array<?xf32>>, !fir.ref<f32>) -> !fir.array<10xf32>
+! CHECK: fir.save_result %[[VAL_19]] to %[[VAL_18]](%{{.*}}) : !fir.array<10xf32>, !fir.ref<!fir.array<10xf32>>, !fir.shape<1>
+! CHECK: }
+! CHECK: hlfir.copy_out %{{.*}}, %[[VAL_4]]#1 to %{{.*}} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, i1, !fir.box<!fir.array<?xf32>>) -> ()
+! CHECK: hlfir.assign %[[VAL_17]] to %{{.*}} : !hlfir.expr<10xf32>, !fir.box<!fir.array<?xf32>>
+! CHECK: hlfir.end_associate %[[VAL_6]]#1, %[[VAL_6]]#2 : !fir.ref<f32>, i1
+! CHECK: hlfir.destroy %[[VAL_17]] : !hlfir.expr<10xf32>
+
+subroutine test3(x)
+ interface
+ function array_func3(x)
+ real :: x, array_func3(10)
+ end function array_func3
+ end interface
+ logical :: x
+ if (any(array_func3(1.0).le.array_func3(2.0))) x = .true.
+end subroutine test3
+! CHECK-LABEL: func.func @_QPtest3(
+! CHECK: %[[VAL_2:.*]] = arith.constant 1.000000e+00 : f32
+! CHECK: %[[VAL_3:.*]]:3 = hlfir.associate %[[VAL_2]] {adapt.valuebyref} : (f32) -> (!fir.ref<f32>, !fir.ref<f32>, i1)
+! CHECK: %[[VAL_14:.*]] = hlfir.eval_in_mem shape %{{.*}} : (!fir.shape<1>) -> !hlfir.expr<10xf32> {
+! CHECK: ^bb0(%[[VAL_15:.*]]: !fir.ref<!fir.array<10xf32>>):
+! CHECK: %[[VAL_16:.*]] = fir.call @_QParray_func3(%[[VAL_3]]#0) fastmath<contract> : (!fir.ref<f32>) -> !fir.array<10xf32>
+! CHECK: fir.save_result %[[VAL_16]] to %[[VAL_15]](%{{.*}}) : !fir.array<10xf32>, !fir.ref<!fir.array<10xf32>>, !fir.shape<1>
+! CHECK: }
+! CHECK: %[[VAL_17:.*]] = arith.constant 2.000000e+00 : f32
+! CHECK: %[[VAL_18:.*]]:3 = hlfir.associate %[[VAL_17]] {adapt.valuebyref} : (f32) -> (!fir.ref<f32>, !fir.ref<f32>, i1)
+! CHECK: %[[VAL_29:.*]] = hlfir.eval_in_mem shape %{{.*}} : (!fir.shape<1>) -> !hlfir.expr<10xf32> {
+! CHECK: ^bb0(%[[VAL_30:.*]]: !fir.ref<!fir.array<10xf32>>):
+! CHECK: %[[VAL_31:.*]] = fir.call @_QParray_func3(%[[VAL_18]]#0) fastmath<contract> : (!fir.ref<f32>) -> !fir.array<10xf32>
+! CHECK: fir.save_result %[[VAL_31]] to %[[VAL_30]](%{{.*}}) : !fir.array<10xf32>, !fir.ref<!fir.array<10xf32>>, !fir.shape<1>
+! CHECK: }
+! CHECK: %[[VAL_32:.*]] = hlfir.elemental %{{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr<?x!fir.logical<4>> {
+! CHECK: ^bb0(%[[VAL_33:.*]]: index):
+! CHECK: %[[VAL_34:.*]] = hlfir.apply %[[VAL_14]], %[[VAL_33]] : (!hlfir.expr<10xf32>, index) -> f32
+! CHECK: %[[VAL_35:.*]] = hlfir.apply %[[VAL_29]], %[[VAL_33]] : (!hlfir.expr<10xf32>, index) -> f32
+! CHECK: %[[VAL_36:.*]] = arith.cmpf ole, %[[VAL_34]], %[[VAL_35]] fastmath<contract> : f32
+! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_36]] : (i1) -> !fir.logical<4>
+! CHECK: hlfir.yield_element %[[VAL_37]] : !fir.logical<4>
+! CHECK: }
+! CHECK: %[[VAL_38:.*]] = hlfir.any %[[VAL_32]] : (!hlfir.expr<?x!fir.logical<4>>) -> !fir.logical<4>
+! CHECK: hlfir.destroy %[[VAL_32]] : !hlfir.expr<?x!fir.logical<4>>
+! CHECK: hlfir.end_associate %[[VAL_18]]#1, %[[VAL_18]]#2 : !fir.ref<f32>, i1
+! CHECK: hlfir.destroy %[[VAL_29]] : !hlfir.expr<10xf32>
+! CHECK: hlfir.end_associate %[[VAL_3]]#1, %[[VAL_3]]#2 : !fir.ref<f32>, i1
+! CHECK: hlfir.destroy %[[VAL_14]] : !hlfir.expr<10xf32>
+! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_38]] : (!fir.logical<4>) -> i1
+! CHECK: fir.if %[[VAL_39]] {
diff --git a/flang/test/Lower/HLFIR/entry_return.f90 b/flang/test/Lower/HLFIR/entry_return.f90
index 5d3e160..18fb2b57 100644
--- a/flang/test/Lower/HLFIR/entry_return.f90
+++ b/flang/test/Lower/HLFIR/entry_return.f90
@@ -51,13 +51,13 @@ end function
! CHECK: %[[VAL_6:.*]]:3 = hlfir.associate %[[VAL_4]] {adapt.valuebyref} : (f32) -> (!fir.ref<f32>, !fir.ref<f32>, i1)
! CHECK: %[[VAL_7:.*]]:3 = hlfir.associate %[[VAL_5]] {adapt.valuebyref} : (f32) -> (!fir.ref<f32>, !fir.ref<f32>, i1)
! CHECK: %[[VAL_8:.*]] = fir.call @_QPcomplex(%[[VAL_6]]#0, %[[VAL_7]]#0) fastmath<contract> : (!fir.ref<f32>, !fir.ref<f32>) -> f32
-! CHECK: hlfir.end_associate %[[VAL_6]]#1, %[[VAL_6]]#2 : !fir.ref<f32>, i1
-! CHECK: hlfir.end_associate %[[VAL_7]]#1, %[[VAL_7]]#2 : !fir.ref<f32>, i1
! CHECK: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: %[[VAL_10:.*]] = fir.undefined complex<f32>
! CHECK: %[[VAL_11:.*]] = fir.insert_value %[[VAL_10]], %[[VAL_8]], [0 : index] : (complex<f32>, f32) -> complex<f32>
! CHECK: %[[VAL_12:.*]] = fir.insert_value %[[VAL_11]], %[[VAL_9]], [1 : index] : (complex<f32>, f32) -> complex<f32>
! CHECK: hlfir.assign %[[VAL_12]] to %[[VAL_1]]#0 : complex<f32>, !fir.ref<complex<f32>>
+! CHECK: hlfir.end_associate %[[VAL_6]]#1, %[[VAL_6]]#2 : !fir.ref<f32>, i1
+! CHECK: hlfir.end_associate %[[VAL_7]]#1, %[[VAL_7]]#2 : !fir.ref<f32>, i1
! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref<!fir.logical<4>>
! CHECK: return %[[VAL_13]] : !fir.logical<4>
! CHECK: }
@@ -74,13 +74,13 @@ end function
! CHECK: %[[VAL_6:.*]]:3 = hlfir.associate %[[VAL_4]] {adapt.valuebyref} : (f32) -> (!fir.ref<f32>, !fir.ref<f32>, i1)
! CHECK: %[[VAL_7:.*]]:3 = hlfir.associate %[[VAL_5]] {adapt.valuebyref} : (f32) -> (!fir.ref<f32>, !fir.ref<f32>, i1)
! CHECK: %[[VAL_8:.*]] = fir.call @_QPcomplex(%[[VAL_6]]#0, %[[VAL_7]]#0) fastmath<contract> : (!fir.ref<f32>, !fir.ref<f32>) -> f32
-! CHECK: hlfir.end_associate %[[VAL_6]]#1, %[[VAL_6]]#2 : !fir.ref<f32>, i1
-! CHECK: hlfir.end_associate %[[VAL_7]]#1, %[[VAL_7]]#2 : !fir.ref<f32>, i1
! CHECK: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: %[[VAL_10:.*]] = fir.undefined complex<f32>
! CHECK: %[[VAL_11:.*]] = fir.insert_value %[[VAL_10]], %[[VAL_8]], [0 : index] : (complex<f32>, f32) -> complex<f32>
! CHECK: %[[VAL_12:.*]] = fir.insert_value %[[VAL_11]], %[[VAL_9]], [1 : index] : (complex<f32>, f32) -> complex<f32>
! CHECK: hlfir.assign %[[VAL_12]] to %[[VAL_1]]#0 : complex<f32>, !fir.ref<complex<f32>>
+! CHECK: hlfir.end_associate %[[VAL_6]]#1, %[[VAL_6]]#2 : !fir.ref<f32>, i1
+! CHECK: hlfir.end_associate %[[VAL_7]]#1, %[[VAL_7]]#2 : !fir.ref<f32>, i1
! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_1]]#0 : !fir.ref<complex<f32>>
! CHECK: return %[[VAL_13]] : complex<f32>
! CHECK: }
diff --git a/flang/test/Lower/HLFIR/proc-pointer-comp-nopass.f90 b/flang/test/Lower/HLFIR/proc-pointer-comp-nopass.f90
index 28659a3..206b6e4 100644
--- a/flang/test/Lower/HLFIR/proc-pointer-comp-nopass.f90
+++ b/flang/test/Lower/HLFIR/proc-pointer-comp-nopass.f90
@@ -32,8 +32,8 @@ end function
! CHECK: %[[VAL_7:.*]] = fir.load %[[VAL_6]] : !fir.ref<!fir.boxproc<(!fir.ref<f32>) -> f32>>
! CHECK: %[[VAL_8:.*]] = fir.box_addr %[[VAL_7]] : (!fir.boxproc<(!fir.ref<f32>) -> f32>) -> ((!fir.ref<f32>) -> f32)
! CHECK: %[[VAL_9:.*]] = fir.call %[[VAL_8]](%[[VAL_5]]#0) fastmath<contract> : (!fir.ref<f32>) -> f32
-! CHECK: hlfir.end_associate %[[VAL_5]]#1, %[[VAL_5]]#2 : !fir.ref<f32>, i1
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_2]]#0 : f32, !fir.ref<f32>
+! CHECK: hlfir.end_associate %[[VAL_5]]#1, %[[VAL_5]]#2 : !fir.ref<f32>, i1
subroutine test2(x)
use proc_comp_defs, only : t, iface
diff --git a/flang/test/Lower/OpenACC/acc-atomic-capture.f90 b/flang/test/Lower/OpenACC/acc-atomic-capture.f90
index 8205990..ee38ab6 100644
--- a/flang/test/Lower/OpenACC/acc-atomic-capture.f90
+++ b/flang/test/Lower/OpenACC/acc-atomic-capture.f90
@@ -306,3 +306,60 @@ end subroutine comp_ref_in_atomic_capture2
! CHECK: }
! CHECK: acc.atomic.read %[[V_DECL]]#0 = %[[C]] : !fir.ref<i32>, !fir.ref<i32>, i32
! CHECK: }
+
+! CHECK-LABEL: func.func @_QPatomic_capture_with_associate() {
+subroutine atomic_capture_with_associate
+ interface
+ integer function func(x)
+ integer :: x
+ end function func
+ end interface
+! CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "_QFatomic_capture_with_associateEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "_QFatomic_capture_with_associateEy"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "_QFatomic_capture_with_associateEz"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+ integer :: x, y, z
+
+! CHECK: %[[VAL_10:.*]]:3 = hlfir.associate %{{.*}} {adapt.valuebyref} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: %[[VAL_11:.*]] = fir.call @_QPfunc(%[[VAL_10]]#0) fastmath<contract> : (!fir.ref<i32>) -> i32
+! CHECK: acc.atomic.capture {
+! CHECK: acc.atomic.read %[[X_DECL]]#0 = %[[Y_DECL]]#0 : !fir.ref<i32>, !fir.ref<i32>, i32
+! CHECK: acc.atomic.write %[[Y_DECL]]#0 = %[[VAL_11]] : !fir.ref<i32>, i32
+! CHECK: }
+! CHECK: hlfir.end_associate %[[VAL_10]]#1, %[[VAL_10]]#2 : !fir.ref<i32>, i1
+ !$acc atomic capture
+ x = y
+ y = func(z + 1)
+ !$acc end atomic
+
+! CHECK: %[[VAL_15:.*]]:3 = hlfir.associate %{{.*}} {adapt.valuebyref} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: %[[VAL_16:.*]] = fir.call @_QPfunc(%[[VAL_15]]#0) fastmath<contract> : (!fir.ref<i32>) -> i32
+! CHECK: acc.atomic.capture {
+! CHECK: acc.atomic.update %[[Y_DECL]]#0 : !fir.ref<i32> {
+! CHECK: ^bb0(%[[VAL_17:.*]]: i32):
+! CHECK: %[[VAL_18:.*]] = arith.muli %[[VAL_16]], %[[VAL_17]] : i32
+! CHECK: acc.yield %[[VAL_18]] : i32
+! CHECK: }
+! CHECK: acc.atomic.read %[[X_DECL]]#0 = %[[Y_DECL]]#0 : !fir.ref<i32>, !fir.ref<i32>, i32
+! CHECK: }
+! CHECK: hlfir.end_associate %[[VAL_15]]#1, %[[VAL_15]]#2 : !fir.ref<i32>, i1
+ !$acc atomic capture
+ y = func(z + 1) * y
+ x = y
+ !$acc end atomic
+
+! CHECK: %[[VAL_22:.*]]:3 = hlfir.associate %{{.*}} {adapt.valuebyref} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: %[[VAL_23:.*]] = fir.call @_QPfunc(%[[VAL_22]]#0) fastmath<contract> : (!fir.ref<i32>) -> i32
+! CHECK: acc.atomic.capture {
+! CHECK: acc.atomic.read %[[X_DECL]]#0 = %[[Y_DECL]]#0 : !fir.ref<i32>, !fir.ref<i32>, i32
+! CHECK: acc.atomic.update %[[Y_DECL]]#0 : !fir.ref<i32> {
+! CHECK: ^bb0(%[[VAL_24:.*]]: i32):
+! CHECK: %[[VAL_25:.*]] = arith.addi %[[VAL_23]], %[[VAL_24]] : i32
+! CHECK: acc.yield %[[VAL_25]] : i32
+! CHECK: }
+! CHECK: }
+! CHECK: hlfir.end_associate %[[VAL_22]]#1, %[[VAL_22]]#2 : !fir.ref<i32>, i1
+ !$acc atomic capture
+ x = y
+ y = func(z + 1) + y
+ !$acc end atomic
+end subroutine atomic_capture_with_associate
diff --git a/flang/test/Lower/OpenACC/acc-atomic-update.f90 b/flang/test/Lower/OpenACC/acc-atomic-update.f90
index da29728..71aa69f 100644
--- a/flang/test/Lower/OpenACC/acc-atomic-update.f90
+++ b/flang/test/Lower/OpenACC/acc-atomic-update.f90
@@ -3,6 +3,11 @@
! RUN: %flang_fc1 -fopenacc -emit-hlfir %s -o - | FileCheck %s
program acc_atomic_update_test
+ interface
+ integer function func(x)
+ integer :: x
+ end function func
+ end interface
integer :: x, y, z
integer, pointer :: a, b
integer, target :: c, d
@@ -67,7 +72,18 @@ program acc_atomic_update_test
!$acc atomic
i1 = i1 + 1
!$acc end atomic
+
+!CHECK: %[[VAL_44:.*]]:3 = hlfir.associate %{{.*}} {adapt.valuebyref} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+!CHECK: %[[VAL_45:.*]] = fir.call @_QPfunc(%[[VAL_44]]#0) fastmath<contract> : (!fir.ref<i32>) -> i32
+!CHECK: acc.atomic.update %[[X_DECL]]#0 : !fir.ref<i32> {
+!CHECK: ^bb0(%[[VAL_46:.*]]: i32):
+!CHECK: %[[VAL_47:.*]] = arith.addi %[[VAL_46]], %[[VAL_45]] : i32
+!CHECK: acc.yield %[[VAL_47]] : i32
+!CHECK: }
+!CHECK: hlfir.end_associate %[[VAL_44]]#1, %[[VAL_44]]#2 : !fir.ref<i32>, i1
+ !$acc atomic update
+ x = x + func(z + 1)
+ !$acc end atomic
!CHECK: return
!CHECK: }
end program acc_atomic_update_test
-
diff --git a/flang/test/Lower/OpenMP/Todo/defaultmap-clause-firstprivate.f90 b/flang/test/Lower/OpenMP/Todo/defaultmap-clause-firstprivate.f90
new file mode 100644
index 0000000..0af2c7f
--- /dev/null
+++ b/flang/test/Lower/OpenMP/Todo/defaultmap-clause-firstprivate.f90
@@ -0,0 +1,11 @@
+!RUN: %not_todo_cmd bbc -emit-hlfir -fopenmp -fopenmp-version=51 -o - %s 2>&1 | FileCheck %s
+!RUN: %not_todo_cmd %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=51 -o - %s 2>&1 | FileCheck %s
+
+subroutine f00
+ implicit none
+ integer :: i
+ !CHECK: not yet implemented: Firstprivate and None are currently unsupported defaultmap behaviour
+ !$omp target defaultmap(firstprivate)
+ i = 10
+ !$omp end target
+ end
diff --git a/flang/test/Lower/OpenMP/Todo/defaultmap-clause-none.f90 b/flang/test/Lower/OpenMP/Todo/defaultmap-clause-none.f90
new file mode 100644
index 0000000..287eb4a
--- /dev/null
+++ b/flang/test/Lower/OpenMP/Todo/defaultmap-clause-none.f90
@@ -0,0 +1,11 @@
+!RUN: %not_todo_cmd bbc -emit-hlfir -fopenmp -fopenmp-version=51 -o - %s 2>&1 | FileCheck %s
+!RUN: %not_todo_cmd %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=51 -o - %s 2>&1 | FileCheck %s
+
+subroutine f00
+ implicit none
+ integer :: i
+ !CHECK: not yet implemented: Firstprivate and None are currently unsupported defaultmap behaviour
+ !$omp target defaultmap(none)
+ i = 10
+ !$omp end target
+end
diff --git a/flang/test/Lower/OpenMP/Todo/defaultmap-clause.f90 b/flang/test/Lower/OpenMP/Todo/defaultmap-clause.f90
deleted file mode 100644
index 062399d..0000000
--- a/flang/test/Lower/OpenMP/Todo/defaultmap-clause.f90
+++ /dev/null
@@ -1,8 +0,0 @@
-!RUN: %not_todo_cmd bbc -emit-hlfir -fopenmp -fopenmp-version=45 -o - %s 2>&1 | FileCheck %s
-!RUN: %not_todo_cmd %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=45 -o - %s 2>&1 | FileCheck %s
-
-!CHECK: not yet implemented: DEFAULTMAP clause is not implemented yet
-subroutine f00
- !$omp target defaultmap(tofrom:scalar)
- !$omp end target
-end
diff --git a/flang/test/Lower/OpenMP/atomic-capture.f90 b/flang/test/Lower/OpenMP/atomic-capture.f90
index bbb0822..2f800d5 100644
--- a/flang/test/Lower/OpenMP/atomic-capture.f90
+++ b/flang/test/Lower/OpenMP/atomic-capture.f90
@@ -97,3 +97,59 @@ subroutine pointers_in_atomic_capture()
b = a
!$omp end atomic
end subroutine
+
+! Check that the clean-ups associated with the function call
+! are generated after the omp.atomic.capture operation:
+! CHECK-LABEL: func.func @_QPfunc_call_cleanup(
+subroutine func_call_cleanup(x, v, vv)
+ interface
+ integer function func(x)
+ integer :: x
+ end function func
+ end interface
+ integer :: x, v, vv
+
+! CHECK: %[[VAL_7:.*]]:3 = hlfir.associate %{{.*}} {adapt.valuebyref} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: %[[VAL_8:.*]] = fir.call @_QPfunc(%[[VAL_7]]#0) fastmath<contract> : (!fir.ref<i32>) -> i32
+! CHECK: omp.atomic.capture {
+! CHECK: omp.atomic.read %[[VAL_1:.*]]#0 = %[[VAL_3:.*]]#0 : !fir.ref<i32>, !fir.ref<i32>, i32
+! CHECK: omp.atomic.write %[[VAL_3]]#0 = %[[VAL_8]] : !fir.ref<i32>, i32
+! CHECK: }
+! CHECK: hlfir.end_associate %[[VAL_7]]#1, %[[VAL_7]]#2 : !fir.ref<i32>, i1
+ !$omp atomic capture
+ v = x
+ x = func(vv + 1)
+ !$omp end atomic
+
+! CHECK: %[[VAL_12:.*]]:3 = hlfir.associate %{{.*}} {adapt.valuebyref} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: %[[VAL_13:.*]] = fir.call @_QPfunc(%[[VAL_12]]#0) fastmath<contract> : (!fir.ref<i32>) -> i32
+! CHECK: omp.atomic.capture {
+! CHECK: omp.atomic.read %[[VAL_1]]#0 = %[[VAL_3]]#0 : !fir.ref<i32>, !fir.ref<i32>, i32
+! CHECK: omp.atomic.update %[[VAL_3]]#0 : !fir.ref<i32> {
+! CHECK: ^bb0(%[[VAL_14:.*]]: i32):
+! CHECK: %[[VAL_15:.*]] = arith.addi %[[VAL_13]], %[[VAL_14]] : i32
+! CHECK: omp.yield(%[[VAL_15]] : i32)
+! CHECK: }
+! CHECK: }
+! CHECK: hlfir.end_associate %[[VAL_12]]#1, %[[VAL_12]]#2 : !fir.ref<i32>, i1
+ !$omp atomic capture
+ v = x
+ x = func(vv + 1) + x
+ !$omp end atomic
+
+! CHECK: %[[VAL_19:.*]]:3 = hlfir.associate %{{.*}} {adapt.valuebyref} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: %[[VAL_20:.*]] = fir.call @_QPfunc(%[[VAL_19]]#0) fastmath<contract> : (!fir.ref<i32>) -> i32
+! CHECK: omp.atomic.capture {
+! CHECK: omp.atomic.update %[[VAL_3]]#0 : !fir.ref<i32> {
+! CHECK: ^bb0(%[[VAL_21:.*]]: i32):
+! CHECK: %[[VAL_22:.*]] = arith.addi %[[VAL_20]], %[[VAL_21]] : i32
+! CHECK: omp.yield(%[[VAL_22]] : i32)
+! CHECK: }
+! CHECK: omp.atomic.read %[[VAL_1]]#0 = %[[VAL_3]]#0 : !fir.ref<i32>, !fir.ref<i32>, i32
+! CHECK: }
+! CHECK: hlfir.end_associate %[[VAL_19]]#1, %[[VAL_19]]#2 : !fir.ref<i32>, i1
+ !$omp atomic capture
+ x = func(vv + 1) + x
+ v = x
+ !$omp end atomic
+end subroutine func_call_cleanup
diff --git a/flang/test/Lower/OpenMP/atomic-update.f90 b/flang/test/Lower/OpenMP/atomic-update.f90
index 257ae8f..3f840ac 100644
--- a/flang/test/Lower/OpenMP/atomic-update.f90
+++ b/flang/test/Lower/OpenMP/atomic-update.f90
@@ -219,3 +219,24 @@ program OmpAtomicUpdate
!$omp atomic update
w = w + g
end program OmpAtomicUpdate
+
+! Check that the clean-ups associated with the function call
+! are generated after the omp.atomic.update operation:
+! CHECK-LABEL: func.func @_QPfunc_call_cleanup(
+subroutine func_call_cleanup(v, vv)
+ integer v, vv
+
+! CHECK: %[[VAL_6:.*]]:3 = hlfir.associate %{{.*}} {adapt.valuebyref} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: %[[VAL_7:.*]] = fir.call @_QPfunc(%[[VAL_6]]#0) fastmath<contract> : (!fir.ref<i32>) -> f32
+! CHECK: omp.atomic.update %{{.*}} : !fir.ref<i32> {
+! CHECK: ^bb0(%[[VAL_8:.*]]: i32):
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i32) -> f32
+! CHECK: %[[VAL_10:.*]] = arith.addf %[[VAL_9]], %[[VAL_7]] fastmath<contract> : f32
+! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_10]] : (f32) -> i32
+! CHECK: omp.yield(%[[VAL_11]] : i32)
+! CHECK: }
+! CHECK: hlfir.end_associate %[[VAL_6]]#1, %[[VAL_6]]#2 : !fir.ref<i32>, i1
+ !$omp atomic update
+ v = v + func(vv + 1)
+ !$omp end atomic
+end subroutine func_call_cleanup
diff --git a/flang/test/Lower/OpenMP/defaultmap.f90 b/flang/test/Lower/OpenMP/defaultmap.f90
new file mode 100644
index 0000000..89d86ac
--- /dev/null
+++ b/flang/test/Lower/OpenMP/defaultmap.f90
@@ -0,0 +1,105 @@
+!RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=52 %s -o - | FileCheck %s
+
+subroutine defaultmap_allocatable_present()
+ implicit none
+ integer, dimension(:), allocatable :: arr
+
+! CHECK: %[[MAP_1:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, i32) map_clauses(implicit, present, exit_release_or_enter_alloc) capture(ByRef) var_ptr_ptr({{.*}}) bounds({{.*}}) -> !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>> {name = ""}
+! CHECK: %[[MAP_2:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.box<!fir.heap<!fir.array<?xi32>>>) map_clauses(implicit, to) capture(ByRef) members({{.*}}) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {name = "arr"}
+!$omp target defaultmap(present: allocatable)
+ arr(1) = 10
+!$omp end target
+
+ return
+end subroutine
+
+subroutine defaultmap_scalar_tofrom()
+ implicit none
+ integer :: scalar_int
+
+! CHECK: %[[MAP:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<i32>, i32) map_clauses(implicit, tofrom) capture(ByRef) -> !fir.ref<i32> {name = "scalar_int"}
+ !$omp target defaultmap(tofrom: scalar)
+ scalar_int = 20
+ !$omp end target
+
+ return
+end subroutine
+
+subroutine defaultmap_all_default()
+ implicit none
+ integer, dimension(:), allocatable :: arr
+ integer :: aggregate(16)
+ integer :: scalar_int
+
+! CHECK: %[[MAP_1:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<i32>, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = "scalar_int"}
+! CHECK: %[[MAP_2:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, i32) map_clauses(implicit, tofrom) capture(ByRef) var_ptr_ptr({{.*}}) bounds({{.*}}) -> !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>> {name = ""}
+! CHECK: %[[MAP_3:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.box<!fir.heap<!fir.array<?xi32>>>) map_clauses(implicit, to) capture(ByRef) members({{.*}}) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {name = "arr"}
+! CHECK: %[[MAP_4:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.array<16xi32>>, !fir.array<16xi32>) map_clauses(implicit, tofrom) capture(ByRef) bounds({{.*}}) -> !fir.ref<!fir.array<16xi32>> {name = "aggregate"}
+
+ !$omp target defaultmap(default: all)
+ scalar_int = 20
+ arr(1) = scalar_int + aggregate(1)
+ !$omp end target
+
+ return
+end subroutine
+
+subroutine defaultmap_pointer_to()
+ implicit none
+ integer, dimension(:), pointer :: arr_ptr(:)
+ integer :: scalar_int
+
+! CHECK: %[[MAP_1:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>, i32) map_clauses(implicit, to) capture(ByRef) var_ptr_ptr({{.*}}) bounds({{.*}}) -> !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>> {name = ""}
+! CHECK: %[[MAP_2:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>, !fir.box<!fir.ptr<!fir.array<?xi32>>>) map_clauses(implicit, to) capture(ByRef) members({{.*}}) -> !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>> {name = "arr_ptr"}
+! CHECK: %[[MAP_3:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<i32>, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = "scalar_int"}
+ !$omp target defaultmap(to: pointer)
+ arr_ptr(1) = scalar_int + 20
+ !$omp end target
+
+ return
+end subroutine
+
+subroutine defaultmap_scalar_from()
+ implicit none
+ integer :: scalar_test
+
+! CHECK:%[[MAP:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<i32>, i32) map_clauses(implicit, from) capture(ByRef) -> !fir.ref<i32> {name = "scalar_test"}
+ !$omp target defaultmap(from: scalar)
+ scalar_test = 20
+ !$omp end target
+
+ return
+end subroutine
+
+subroutine defaultmap_aggregate_to()
+ implicit none
+ integer :: aggregate_arr(16)
+ integer :: scalar_test
+
+! CHECK: %[[MAP_1:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<i32>, i32) map_clauses(tofrom) capture(ByRef) -> !fir.ref<i32> {name = "scalar_test"}
+! CHECK: %[[MAP_2:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.array<16xi32>>, !fir.array<16xi32>) map_clauses(implicit, to) capture(ByRef) bounds({{.*}}) -> !fir.ref<!fir.array<16xi32>> {name = "aggregate_arr"}
+ !$omp target map(tofrom: scalar_test) defaultmap(to: aggregate)
+ aggregate_arr(1) = 1
+ scalar_test = 1
+ !$omp end target
+
+ return
+end subroutine
+
+subroutine defaultmap_dtype_aggregate_to()
+ implicit none
+ type :: dtype
+ integer(4) :: array_i(10)
+ integer(4) :: k
+ end type dtype
+
+ type(dtype) :: aggregate_type
+
+! CHECK: %[[MAP:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref<!fir.type<_QFdefaultmap_dtype_aggregate_toTdtype{array_i:!fir.array<10xi32>,k:i32}>>, !fir.type<_QFdefaultmap_dtype_aggregate_toTdtype{array_i:!fir.array<10xi32>,k:i32}>) map_clauses(implicit, to) capture(ByRef) -> !fir.ref<!fir.type<_QFdefaultmap_dtype_aggregate_toTdtype{array_i:!fir.array<10xi32>,k:i32}>> {name = "aggregate_type"}
+ !$omp target defaultmap(to: aggregate)
+ aggregate_type%k = 40
+ aggregate_type%array_i(1) = 50
+ !$omp end target
+
+ return
+end subroutine
diff --git a/flang/test/Parser/OpenMP/bug518.f b/flang/test/Parser/OpenMP/bug518.f
index 2dbacef..2739de6 100644
--- a/flang/test/Parser/OpenMP/bug518.f
+++ b/flang/test/Parser/OpenMP/bug518.f
@@ -9,9 +9,9 @@
!$omp end parallel
end
-!CHECK-E:{{^}}!$ thread = OMP_GET_MAX_THREADS()
+!CHECK-E:{{^}}!$ thread = OMP_GET_MAX_THREADS()
!CHECK-E:{{^}}!$omp parallel private(ia)
-!CHECK-E:{{^}}!$ continue
+!CHECK-E:{{^}}!$ continue
!CHECK-E:{{^}}!$omp end parallel
!CHECK-OMP:thread=omp_get_max_threads()
diff --git a/flang/test/Parser/OpenMP/compiler-directive-continuation.f90 b/flang/test/Parser/OpenMP/compiler-directive-continuation.f90
index 169976d..644ab3f 100644
--- a/flang/test/Parser/OpenMP/compiler-directive-continuation.f90
+++ b/flang/test/Parser/OpenMP/compiler-directive-continuation.f90
@@ -7,10 +7,10 @@
! CHECK-LABEL: subroutine mixed_form1()
! CHECK-E:{{^}} i = 1 &
! CHECK-E:{{^}}!$ +100&
-! CHECK-E:{{^}}!$ &+ 1000&
-! CHECK-E:{{^}} &+ 10 + 1&
-! CHECK-E:{{^}}!$ & +100000&
-! CHECK-E:{{^}} &0000 + 1000000
+! CHECK-E:{{^}}!$ &+ 1000&
+! CHECK-E:{{^}} &+ 10 + 1&
+! CHECK-E:{{^}}!$ & +100000&
+! CHECK-E:{{^}} &0000 + 1000000
! CHECK-OMP: i=1001001112_4
! CHECK-NO-OMP: i=1010011_4
subroutine mixed_form1()
@@ -39,8 +39,8 @@ end subroutine
! CHECK-LABEL: subroutine mixed_form3()
! CHECK-E:{{^}}!$ i=0
! CHECK-E:{{^}}!$ i = 1 &
-! CHECK-E:{{^}}!$ & +10 &
-! CHECK-E:{{^}}!$ &+100&
+! CHECK-E:{{^}}!$ & +10 &
+! CHECK-E:{{^}}!$ &+100&
! CHECK-E:{{^}}!$ +1000
! CHECK-OMP: i=0_4
! CHECK-OMP: i=1111_4
diff --git a/flang/test/Parser/OpenMP/defaultmap-clause.f90 b/flang/test/Parser/OpenMP/defaultmap-clause.f90
index dc036ae..d908258 100644
--- a/flang/test/Parser/OpenMP/defaultmap-clause.f90
+++ b/flang/test/Parser/OpenMP/defaultmap-clause.f90
@@ -82,3 +82,19 @@ end
!PARSE-TREE: | OmpClauseList -> OmpClause -> Defaultmap -> OmpDefaultmapClause
!PARSE-TREE: | | ImplicitBehavior = Tofrom
!PARSE-TREE: | | Modifier -> OmpVariableCategory -> Value = Scalar
+
+subroutine f05
+ !$omp target defaultmap(present: scalar)
+ !$omp end target
+end
+
+!UNPARSE: SUBROUTINE f05
+!UNPARSE: !$OMP TARGET DEFAULTMAP(PRESENT:SCALAR)
+!UNPARSE: !$OMP END TARGET
+!UNPARSE: END SUBROUTINE
+
+!PARSE-TREE: OmpBeginBlockDirective
+!PARSE-TREE: | OmpBlockDirective -> llvm::omp::Directive = target
+!PARSE-TREE: | OmpClauseList -> OmpClause -> Defaultmap -> OmpDefaultmapClause
+!PARSE-TREE: | | ImplicitBehavior = Present
+!PARSE-TREE: | | Modifier -> OmpVariableCategory -> Value = Scalar
diff --git a/flang/test/Parser/OpenMP/sentinels.f b/flang/test/Parser/OpenMP/sentinels.f
index 299b83e..f5a2fd4 100644
--- a/flang/test/Parser/OpenMP/sentinels.f
+++ b/flang/test/Parser/OpenMP/sentinels.f
@@ -61,12 +61,12 @@ c$ +& , "comment"
! Test valid chars in initial and continuation lines.
! CHECK: !$ 20 PRINT *, "msg2"
-! CHECK: !$ & , "msg3"
+! CHECK: !$ &, "msg3"
c$ 20 PRINT *, "msg2"
c$ & , "msg3"
! CHECK: !$ PRINT *, "msg4",
-! CHECK: !$ & "msg5"
+! CHECK: !$ &"msg5"
c$ 0PRINT *, "msg4",
c$ + "msg5"
end
diff --git a/flang/test/Parser/continuation-in-conditional-compilation.f b/flang/test/Parser/continuation-in-conditional-compilation.f
index 57b69de..ebc6a3f 100644
--- a/flang/test/Parser/continuation-in-conditional-compilation.f
+++ b/flang/test/Parser/continuation-in-conditional-compilation.f
@@ -1,11 +1,12 @@
! RUN: %flang_fc1 -E %s 2>&1 | FileCheck %s
program main
! CHECK: k01=1+
-! CHECK: !$ & 1
+! CHECK: !$ &1
k01=1+
-!$ & 1
+!$ &1
-! CHECK: !$ k02=23
+! CHECK: !$ k02=2
+! CHECK: 3
! CHECK: !$ &4
!$ k02=2
+3
diff --git a/flang/test/Preprocessing/bug136845.F b/flang/test/Preprocessing/bug136845.F
new file mode 100644
index 0000000..ce52c29
--- /dev/null
+++ b/flang/test/Preprocessing/bug136845.F
@@ -0,0 +1,45 @@
+!RUN: %flang_fc1 -E %s | FileCheck --check-prefix=PREPRO %s
+!RUN: %flang_fc1 -fdebug-unparse %s | FileCheck --check-prefix=NORMAL %s
+!RUN: %flang_fc1 -fopenmp -fdebug-unparse %s | FileCheck --check-prefix=OMP %s
+
+c$ !
+
+C$
+ continue
+
+ k=0 w
+ k=0
+c$ 0 x
+c$ 1 y
+c$ 2 k= z
+c$ ! A
+c$ !1 B
+ print *,k
+*$1 continue
+ end
+
+!PREPRO:!$ &
+!PREPRO: continue
+!PREPRO: k=0
+!PREPRO: k=0
+!PREPRO:!$
+!PREPRO:!$ &
+!PREPRO:!$ &k=
+!PREPRO:!$ &
+!PREPRO:!$ &1
+!PREPRO: print *,k
+!PREPRO:!$ 1 continue
+!PREPRO: end
+
+!NORMAL: k=0_4
+!NORMAL: k=0_4
+!NORMAL: PRINT *, k
+!NORMAL:END PROGRAM
+
+!OMP: CONTINUE
+!OMP: k=0_4
+!OMP: k=0_4
+!OMP: k=1_4
+!OMP: PRINT *, k
+!OMP: 1 CONTINUE
+!OMP:END PROGRAM
diff --git a/flang/test/Semantics/PowerPC/ppc-vector-types04.f90 b/flang/test/Semantics/PowerPC/ppc-vector-types04.f90
index 6842233..71e0b06 100644
--- a/flang/test/Semantics/PowerPC/ppc-vector-types04.f90
+++ b/flang/test/Semantics/PowerPC/ppc-vector-types04.f90
@@ -4,7 +4,7 @@
subroutine vec_type_test(arg1, arg2, arg3, arg4)
!ERROR: Assumed-shape entity of vector(real(4)) type is not supported
vector(real) :: arg1(:)
-!ERROR: Assumed Rank entity of vector(unsigned(4)) type is not supported
+!ERROR: Assumed rank entity of vector(unsigned(4)) type is not supported
vector(unsigned) :: arg2(..)
!ERROR: Deferred-shape entity of vector(integer(4)) type is not supported
vector(integer), allocatable :: arg3(:)
@@ -19,7 +19,7 @@ end subroutine vec_type_test
subroutine vec_pair_type_test(arg1, arg2, arg3, arg4)
!ERROR: Assumed-shape entity of __vector_pair type is not supported
__vector_pair :: arg1(:)
-!ERROR: Assumed Rank entity of __vector_pair type is not supported
+!ERROR: Assumed rank entity of __vector_pair type is not supported
__vector_pair :: arg2(..)
!ERROR: Deferred-shape entity of __vector_pair type is not supported
__vector_pair, allocatable :: arg3(:)
@@ -34,7 +34,7 @@ end subroutine vec_pair_type_test
subroutine vec_quad_type_test(arg1, arg2, arg3, arg4)
!ERROR: Assumed-shape entity of __vector_quad type is not supported
__vector_quad :: arg1(:)
-!ERROR: Assumed Rank entity of __vector_quad type is not supported
+!ERROR: Assumed rank entity of __vector_quad type is not supported
__vector_quad :: arg2(..)
!ERROR: Deferred-shape entity of __vector_quad type is not supported
__vector_quad, allocatable :: arg3(:)
diff --git a/flang/test/Semantics/allocate01.f90 b/flang/test/Semantics/allocate01.f90
index a66e246..a10a725 100644
--- a/flang/test/Semantics/allocate01.f90
+++ b/flang/test/Semantics/allocate01.f90
@@ -62,6 +62,7 @@ subroutine C932(ed1, ed5, ed7, edc9, edc10, okad1, okpd1, okacd5)
real, pointer, save :: okp3
real, allocatable, save :: oka3, okac4[:,:]
real, allocatable :: okacd5(:, :)[:]
+ character(:), allocatable :: chvar
!ERROR: Name in ALLOCATE statement must be a variable name
allocate(foo)
@@ -102,6 +103,8 @@ subroutine C932(ed1, ed5, ed7, edc9, edc10, okad1, okpd1, okacd5)
allocate(edc9%nok)
!ERROR: Entity in ALLOCATE statement must have the ALLOCATABLE or POINTER attribute
allocate(edc10)
+ !ERROR: Type-spec in ALLOCATE must not have a deferred type parameter
+ allocate(character(:) :: chvar)
! No errors expected below:
allocate(a_var)
@@ -117,4 +120,5 @@ subroutine C932(ed1, ed5, ed7, edc9, edc10, okad1, okpd1, okacd5)
allocate(edc9%ok(4))
allocate(edc10%ok)
allocate(rp)
+ allocate(character(123) :: chvar)
end subroutine
diff --git a/flang/test/Semantics/atomic02.f90 b/flang/test/Semantics/atomic02.f90
index 484239a..0d10715 100644
--- a/flang/test/Semantics/atomic02.f90
+++ b/flang/test/Semantics/atomic02.f90
@@ -31,7 +31,7 @@ program test_atomic_and
call atomic_and(non_scalar_coarray, val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_and'
- call atomic_and(non_scalar_coarray[1], val)
+ call atomic_and(non_scalar_coarray(:)[1], val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_and'
call atomic_and(non_coarray, val)
diff --git a/flang/test/Semantics/atomic03.f90 b/flang/test/Semantics/atomic03.f90
index 495df5e..cef21d0 100644
--- a/flang/test/Semantics/atomic03.f90
+++ b/flang/test/Semantics/atomic03.f90
@@ -51,13 +51,13 @@ program test_atomic_cas
call atomic_cas(non_scalar_coarray, old_int, compare_int, new_int)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_cas'
- call atomic_cas(non_scalar_coarray[1], old_int, compare_int, new_int)
+ call atomic_cas(non_scalar_coarray(:)[1], old_int, compare_int, new_int)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_cas'
call atomic_cas(non_scalar_logical_coarray, old_logical, compare_logical, new_logical)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_cas'
- call atomic_cas(non_scalar_logical_coarray[1], old_logical, compare_logical, new_logical)
+ call atomic_cas(non_scalar_logical_coarray(:)[1], old_logical, compare_logical, new_logical)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_cas'
call atomic_cas(non_coarray, old_int, compare_int, new_int)
diff --git a/flang/test/Semantics/atomic04.f90 b/flang/test/Semantics/atomic04.f90
index 9df0b56..453fdb1 100644
--- a/flang/test/Semantics/atomic04.f90
+++ b/flang/test/Semantics/atomic04.f90
@@ -47,13 +47,13 @@ program test_atomic_define
call atomic_define(non_scalar_coarray, val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_define'
- call atomic_define(non_scalar_coarray[1], val)
+ call atomic_define(non_scalar_coarray(:)[1], val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_define'
call atomic_define(non_scalar_logical_coarray, val_logical)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_define'
- call atomic_define(non_scalar_logical_coarray[1], val_logical)
+ call atomic_define(non_scalar_logical_coarray(:)[1], val_logical)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_define'
call atomic_define(non_coarray, val)
diff --git a/flang/test/Semantics/atomic05.f90 b/flang/test/Semantics/atomic05.f90
index 98d6b19..c1e67b0 100644
--- a/flang/test/Semantics/atomic05.f90
+++ b/flang/test/Semantics/atomic05.f90
@@ -41,7 +41,7 @@ program test_atomic_fetch_add
call atomic_fetch_add(array, val, old_val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_fetch_add'
- call atomic_fetch_add(non_scalar_coarray[1], val, old_val)
+ call atomic_fetch_add(non_scalar_coarray(:)[1], val, old_val)
!ERROR: Actual argument for 'atom=' must have kind=atomic_int_kind, but is 'INTEGER(4)'
call atomic_fetch_add(default_kind_coarray, val, old_val)
diff --git a/flang/test/Semantics/atomic06.f90 b/flang/test/Semantics/atomic06.f90
index c6a23dd..57cc81e 100644
--- a/flang/test/Semantics/atomic06.f90
+++ b/flang/test/Semantics/atomic06.f90
@@ -41,7 +41,7 @@ program test_atomic_fetch_and
call atomic_fetch_and(array, val, old_val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_fetch_and'
- call atomic_fetch_and(non_scalar_coarray[1], val, old_val)
+ call atomic_fetch_and(non_scalar_coarray(:)[1], val, old_val)
!ERROR: Actual argument for 'atom=' must have kind=atomic_int_kind, but is 'INTEGER(4)'
call atomic_fetch_and(default_kind_coarray, val, old_val)
diff --git a/flang/test/Semantics/atomic07.f90 b/flang/test/Semantics/atomic07.f90
index 2bc544b..e4d8095 100644
--- a/flang/test/Semantics/atomic07.f90
+++ b/flang/test/Semantics/atomic07.f90
@@ -34,7 +34,7 @@ program test_atomic_fetch_or
call atomic_fetch_or(array, val, old_val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_fetch_or'
- call atomic_fetch_or(non_scalar_coarray[1], val, old_val)
+ call atomic_fetch_or(non_scalar_coarray(:)[1], val, old_val)
!ERROR: Actual argument for 'atom=' must have kind=atomic_int_kind, but is 'INTEGER(4)'
call atomic_fetch_or(default_kind_coarray, val, old_val)
diff --git a/flang/test/Semantics/atomic08.f90 b/flang/test/Semantics/atomic08.f90
index f519f97..234e6e3 100644
--- a/flang/test/Semantics/atomic08.f90
+++ b/flang/test/Semantics/atomic08.f90
@@ -41,7 +41,7 @@ program test_atomic_fetch_xor
call atomic_fetch_xor(array, val, old_val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_fetch_xor'
- call atomic_fetch_xor(non_scalar_coarray[1], val, old_val)
+ call atomic_fetch_xor(non_scalar_coarray(:)[1], val, old_val)
!ERROR: Actual argument for 'atom=' must have kind=atomic_int_kind, but is 'INTEGER(4)'
call atomic_fetch_xor(default_kind_coarray, val, old_val)
diff --git a/flang/test/Semantics/atomic09.f90 b/flang/test/Semantics/atomic09.f90
index e4e0622..4f78ccb 100644
--- a/flang/test/Semantics/atomic09.f90
+++ b/flang/test/Semantics/atomic09.f90
@@ -31,7 +31,7 @@ program test_atomic_or
call atomic_or(non_scalar_coarray, val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_or'
- call atomic_or(non_scalar_coarray[1], val)
+ call atomic_or(non_scalar_coarray(:)[1], val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_or'
call atomic_or(non_coarray, val)
diff --git a/flang/test/Semantics/atomic10.f90 b/flang/test/Semantics/atomic10.f90
index 04efbd6..e206326 100644
--- a/flang/test/Semantics/atomic10.f90
+++ b/flang/test/Semantics/atomic10.f90
@@ -47,13 +47,13 @@ program test_atomic_ref
call atomic_ref(val, non_scalar_coarray)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_ref'
- call atomic_ref(val, non_scalar_coarray[1])
+ call atomic_ref(val, non_scalar_coarray(:)[1])
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_ref'
call atomic_ref(val_logical, non_scalar_logical_coarray)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_ref'
- call atomic_ref(val_logical, non_scalar_logical_coarray[1])
+ call atomic_ref(val_logical, non_scalar_logical_coarray(:)[1])
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_ref'
call atomic_ref(val, non_coarray)
diff --git a/flang/test/Semantics/atomic11.f90 b/flang/test/Semantics/atomic11.f90
index d4f951e..dba7dfd 100644
--- a/flang/test/Semantics/atomic11.f90
+++ b/flang/test/Semantics/atomic11.f90
@@ -31,7 +31,7 @@ program test_atomic_xor
call atomic_xor(non_scalar_coarray, val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_xor'
- call atomic_xor(non_scalar_coarray[1], val)
+ call atomic_xor(non_scalar_coarray(:)[1], val)
!ERROR: 'atom=' argument must be a scalar coarray or coindexed object for intrinsic 'atomic_xor'
call atomic_xor(non_coarray, val)
diff --git a/flang/test/Semantics/bug138915.f90 b/flang/test/Semantics/bug138915.f90
new file mode 100644
index 0000000..786a4ac
--- /dev/null
+++ b/flang/test/Semantics/bug138915.f90
@@ -0,0 +1,15 @@
+! RUN: %python %S/test_errors.py %s %flang_fc1
+module m
+ type base
+ contains
+ procedure, nopass :: tbp
+ end type
+ type, extends(base), abstract :: child
+ contains
+ !ERROR: Override of non-DEFERRED 'tbp' must not be DEFERRED
+ procedure(tbp), deferred, nopass :: tbp
+ end type
+ contains
+ subroutine tbp
+ end
+end
diff --git a/flang/test/Semantics/c_loc01.f90 b/flang/test/Semantics/c_loc01.f90
index abae1e2..a515a7a 100644
--- a/flang/test/Semantics/c_loc01.f90
+++ b/flang/test/Semantics/c_loc01.f90
@@ -48,9 +48,9 @@ module m
cp = c_loc(ch(1:1)) ! ok
cp = c_loc(deferred) ! ok
cp = c_loc(p2ch) ! ok
- !ERROR: PRIVATE name '__address' is only accessible within module '__fortran_builtins'
+ !ERROR: PRIVATE name '__address' is accessible only within module '__fortran_builtins'
cp = c_ptr(0)
- !ERROR: PRIVATE name '__address' is only accessible within module '__fortran_builtins'
+ !ERROR: PRIVATE name '__address' is accessible only within module '__fortran_builtins'
cfp = c_funptr(0)
!ERROR: No intrinsic or user-defined ASSIGNMENT(=) matches operand types TYPE(c_ptr) and TYPE(c_funptr)
cp = cfp
diff --git a/flang/test/Semantics/call07.f90 b/flang/test/Semantics/call07.f90
index 3b5c283..92f2bdb 100644
--- a/flang/test/Semantics/call07.f90
+++ b/flang/test/Semantics/call07.f90
@@ -27,8 +27,10 @@ module m
!PORTABILITY: CONTIGUOUS entity 'scalar' should be an array pointer, assumed-shape, or assumed-rank
real, contiguous :: scalar
call s01(a03) ! ok
- !WARNING: Target of CONTIGUOUS pointer association is not known to be contiguous
+ !ERROR: CONTIGUOUS pointer dummy argument may not be associated with non-CONTIGUOUS pointer actual argument
call s01(a02)
+ !WARNING: Target of CONTIGUOUS pointer association is not known to be contiguous
+ call s01(a02(:))
!ERROR: CONTIGUOUS pointer may not be associated with a discontiguous target
call s01(a03(::2))
call s02(a02) ! ok
diff --git a/flang/test/Semantics/call44.f90 b/flang/test/Semantics/call44.f90
new file mode 100644
index 0000000..f7c4c90
--- /dev/null
+++ b/flang/test/Semantics/call44.f90
@@ -0,0 +1,13 @@
+! RUN: %python %S/test_errors.py %s %flang_fc1 -pedantic -Werror
+subroutine assumedshape(normal, contig)
+ real normal(:)
+ real, contiguous :: contig(:)
+ !WARNING: If the procedure's interface were explicit, this reference would be in error
+ !BECAUSE: Element of assumed-shape array may not be associated with a dummy argument 'assumedsize=' array
+ call seqAssociate(normal(1))
+ !PORTABILITY: Element of contiguous assumed-shape array is accepted for storage sequence association
+ call seqAssociate(contig(1))
+end
+subroutine seqAssociate(assumedSize)
+ real assumedSize(*)
+end
diff --git a/flang/test/Semantics/coarrays02.f90 b/flang/test/Semantics/coarrays02.f90
index dc90716..b16e0cc 100644
--- a/flang/test/Semantics/coarrays02.f90
+++ b/flang/test/Semantics/coarrays02.f90
@@ -96,3 +96,27 @@ module m3
call sub(cat%p)
end
end
+
+subroutine s4
+ type t
+ real, allocatable :: a(:)[:]
+ end type
+ type t2
+ !ERROR: Allocatable or array component 'bad1' may not have a coarray ultimate component '%a'
+ type(t), allocatable :: bad1
+ !ERROR: Pointer 'bad2' may not have a coarray potential component '%a'
+ type(t), pointer :: bad2
+ !ERROR: Allocatable or array component 'bad3' may not have a coarray ultimate component '%a'
+ type(t) :: bad3(2)
+ !ERROR: Component 'bad4' is a coarray and must have the ALLOCATABLE attribute and have a deferred coshape
+ !ERROR: Coarray 'bad4' may not have a coarray potential component '%a'
+ type(t) :: bad4[*]
+ end type
+ type(t), save :: ta(2)
+ !ERROR: 'a' has corank 1, but coindexed reference has 2 cosubscripts
+ print *, ta(1)%a(1)[1,2]
+ !ERROR: An allocatable or pointer component reference must be applied to a scalar base
+ print *, ta(:)%a(1)[1]
+ !ERROR: Subscripts must appear in a coindexed reference when its base is an array
+ print *, ta(1)%a[1]
+end
diff --git a/flang/test/Semantics/coshape.f90 b/flang/test/Semantics/coshape.f90
index d4fb45d..d4e3f2d 100644
--- a/flang/test/Semantics/coshape.f90
+++ b/flang/test/Semantics/coshape.f90
@@ -40,9 +40,9 @@ program coshape_tests
!ERROR: 'coarray=' argument must have corank > 0 for intrinsic 'coshape'
codimensions = coshape(derived_scalar_coarray[1]%x)
!ERROR: 'coarray=' argument must have corank > 0 for intrinsic 'coshape'
- codimensions = coshape(derived_array_coarray[1]%x)
+ codimensions = coshape(derived_array_coarray(:)[1]%x)
!ERROR: 'coarray=' argument must have corank > 0 for intrinsic 'coshape'
- codimensions = coshape(array_coarray[1])
+ codimensions = coshape(array_coarray(:)[1])
!ERROR: 'coarray=' argument must have corank > 0 for intrinsic 'coshape'
codimensions = coshape(scalar_coarray[1])
diff --git a/flang/test/Semantics/error_stop1b.f90 b/flang/test/Semantics/error_stop1b.f90
index 355a049..3c9ace1 100644
--- a/flang/test/Semantics/error_stop1b.f90
+++ b/flang/test/Semantics/error_stop1b.f90
@@ -32,7 +32,7 @@ program test_error_stop
error stop char_array
!ERROR: Must be a scalar value, but is a rank-1 array
- error stop array_coarray[1]
+ error stop array_coarray(:)[1]
!ERROR: Must have LOGICAL type, but is CHARACTER(KIND=1,LEN=128_8)
error stop int_code, quiet=non_logical
diff --git a/flang/test/Semantics/event01b.f90 b/flang/test/Semantics/event01b.f90
index 0cd8a5b..b111187 100644
--- a/flang/test/Semantics/event01b.f90
+++ b/flang/test/Semantics/event01b.f90
@@ -62,7 +62,7 @@ program test_event_post
event post(occurrences)
!ERROR: Must be a scalar value, but is a rank-1 array
- event post(occurrences[1])
+ event post(occurrences(:)[1])
!______ invalid sync-stat-lists: invalid stat= ____________
diff --git a/flang/test/Semantics/io11.f90 b/flang/test/Semantics/io11.f90
index 3529929..c00deed 100644
--- a/flang/test/Semantics/io11.f90
+++ b/flang/test/Semantics/io11.f90
@@ -342,7 +342,7 @@ contains
end subroutine
end module m15
-module m16
+module m16a
type,public :: t
integer c
contains
@@ -355,15 +355,58 @@ contains
class(t), intent(inout) :: dtv
integer, intent(in) :: unit
character(len=*), intent(in) :: iotype
- !ERROR: Dummy argument 'vlist' of a defined input/output procedure must be assumed shape
+ !ERROR: Dummy argument 'vlist' of a defined input/output procedure must be assumed shape vector
integer, intent(in) :: vlist(5)
integer, intent(out) :: iostat
character(len=*), intent(inout) :: iomsg
+ iostat = 343
+ stop 'fail'
+ end subroutine
+end module m16a
+module m16b
+ type,public :: t
+ integer c
+ contains
+ procedure, pass :: tbp=>formattedReadProc
+ generic :: read(formatted) => tbp
+ end type
+ private
+contains
+ subroutine formattedReadProc(dtv, unit, iotype, vlist, iostat, iomsg)
+ class(t), intent(inout) :: dtv
+ integer, intent(in) :: unit
+ character(len=*), intent(in) :: iotype
+ !ERROR: Dummy argument 'vlist' of a defined input/output procedure must be assumed shape vector
+ integer, intent(in) :: vlist(:,:)
+ integer, intent(out) :: iostat
+ character(len=*), intent(inout) :: iomsg
+ iostat = 343
+ stop 'fail'
+ end subroutine
+end module m16b
+
+module m16c
+ type,public :: t
+ integer c
+ contains
+ procedure, pass :: tbp=>formattedReadProc
+ generic :: read(formatted) => tbp
+ end type
+ private
+contains
+ subroutine formattedReadProc(dtv, unit, iotype, vlist, iostat, iomsg)
+ class(t), intent(inout) :: dtv
+ integer, intent(in) :: unit
+ character(len=*), intent(in) :: iotype
+ !ERROR: Dummy argument 'vlist' may not be assumed-rank
+ integer, intent(in) :: vlist(..)
+ integer, intent(out) :: iostat
+ character(len=*), intent(inout) :: iomsg
iostat = 343
stop 'fail'
end subroutine
-end module m16
+end module m16c
module m17
! Test the same defined input/output procedure specified as a generic
diff --git a/flang/test/Semantics/misc-intrinsics.f90 b/flang/test/Semantics/misc-intrinsics.f90
index 14dcdb0..a7895f7 100644
--- a/flang/test/Semantics/misc-intrinsics.f90
+++ b/flang/test/Semantics/misc-intrinsics.f90
@@ -3,17 +3,37 @@
program test_size
real :: scalar
real, dimension(5, 5) :: array
- call test(array, array)
+ call test(array, array, array)
contains
- subroutine test(arg, assumedRank)
+ subroutine test(arg, assumedRank, poly)
real, dimension(5, *) :: arg
real, dimension(..) :: assumedRank
+ class(*) :: poly(5, *)
!ERROR: A dim= argument is required for 'size' when the array is assumed-size
print *, size(arg)
+ print *, size(arg, dim=1) ! ok
+ select type (poly)
+ type is (real)
+ !ERROR: A dim= argument is required for 'size' when the array is assumed-size
+ print *, size(poly)
+ print *, size(poly, dim=1) ! ok
+ end select
!ERROR: A dim= argument is required for 'ubound' when the array is assumed-size
print *, ubound(arg)
+ print *, ubound(arg, dim=1) ! ok
+ select type (poly)
+ type is (real)
+ !ERROR: A dim= argument is required for 'ubound' when the array is assumed-size
+ print *, ubound(poly)
+ print *, ubound(poly, dim=1) ! ok
+ end select
!ERROR: The 'source=' argument to the intrinsic function 'shape' may not be assumed-size
print *, shape(arg)
+ select type (poly)
+ type is (real)
+ !ERROR: The 'source=' argument to the intrinsic function 'shape' may not be assumed-size
+ print *, shape(poly)
+ end select
!ERROR: The 'harvest=' argument to the intrinsic procedure 'random_number' may not be assumed-size
call random_number(arg)
!ERROR: 'array=' argument has unacceptable rank 0
@@ -85,5 +105,16 @@ program test_size
print *, lbound(assumedRank, dim=2)
print *, ubound(assumedRank, dim=2)
end select
+ contains
+ subroutine inner
+ !ERROR: A dim= argument is required for 'size' when the array is assumed-size
+ print *, size(arg)
+ print *, size(arg, dim=1) ! ok
+ !ERROR: A dim= argument is required for 'ubound' when the array is assumed-size
+ print *, ubound(arg)
+ print *, ubound(arg, dim=1) ! ok
+ !ERROR: The 'source=' argument to the intrinsic function 'shape' may not be assumed-size
+ print *, shape(arg)
+ end
end subroutine
end
diff --git a/flang/test/Semantics/modfile75.F90 b/flang/test/Semantics/modfile75.F90
new file mode 100644
index 0000000..aba00ff
--- /dev/null
+++ b/flang/test/Semantics/modfile75.F90
@@ -0,0 +1,17 @@
+!RUN: %flang -c -fhermetic-module-files -DWHICH=1 %s && %flang -c -fhermetic-module-files -DWHICH=2 %s && %flang_fc1 -fdebug-unparse %s | FileCheck %s
+
+#if WHICH == 1
+module modfile75a
+ use iso_c_binding
+end
+#elif WHICH == 2
+module modfile75b
+ use modfile75a
+end
+#else
+program test
+ use modfile75b
+!CHECK: INTEGER(KIND=4_4) n
+ integer(c_int) n
+end
+#endif
diff --git a/flang/test/Semantics/resolve34.f90 b/flang/test/Semantics/resolve34.f90
index 39709a3..da1b80b 100644
--- a/flang/test/Semantics/resolve34.f90
+++ b/flang/test/Semantics/resolve34.f90
@@ -90,16 +90,37 @@ module m7
integer :: i2
integer, private :: i3
end type
+ type :: t3
+ private
+ integer :: i4 = 0
+ procedure(real), pointer, nopass :: pp1 => null()
+ end type
+ type, extends(t3) :: t4
+ private
+ integer :: i5
+ procedure(real), pointer, nopass :: pp2
+ end type
end
subroutine s7
use m7
type(t2) :: x
+ type(t4) :: y
integer :: j
j = x%i2
- !ERROR: PRIVATE name 'i3' is only accessible within module 'm7'
+ !ERROR: PRIVATE name 'i3' is accessible only within module 'm7'
j = x%i3
- !ERROR: PRIVATE name 't1' is only accessible within module 'm7'
+ !ERROR: PRIVATE name 't1' is accessible only within module 'm7'
j = x%t1%i1
+ !ok, parent component is not affected by PRIVATE in t4
+ y%t3 = t3()
+ !ERROR: PRIVATE name 'i4' is accessible only within module 'm7'
+ y%i4 = 0
+ !ERROR: PRIVATE name 'pp1' is accessible only within module 'm7'
+ y%pp1 => null()
+ !ERROR: PRIVATE name 'i5' is accessible only within module 'm7'
+ y%i5 = 0
+ !ERROR: PRIVATE name 'pp2' is accessible only within module 'm7'
+ y%pp2 => null()
end
! 7.5.4.8(2)
@@ -122,11 +143,11 @@ end
subroutine s8
use m8
type(t) :: x
- !ERROR: PRIVATE name 'i2' is only accessible within module 'm8'
+ !ERROR: PRIVATE name 'i2' is accessible only within module 'm8'
x = t(2, 5)
- !ERROR: PRIVATE name 'i2' is only accessible within module 'm8'
+ !ERROR: PRIVATE name 'i2' is accessible only within module 'm8'
x = t(i1=2, i2=5)
- !ERROR: PRIVATE name 'i2' is only accessible within module 'm8'
+ !ERROR: PRIVATE name 'i2' is accessible only within module 'm8'
a = [y%i2]
end
@@ -166,6 +187,6 @@ subroutine s10
use m10
type(t) x
x = t(1)
- !ERROR: PRIVATE name 'operator(+)' is only accessible within module 'm10'
+ !ERROR: PRIVATE name 'operator(+)' is accessible only within module 'm10'
x = x + x
end subroutine
diff --git a/flang/test/Semantics/resolve94.f90 b/flang/test/Semantics/resolve94.f90
index 75755fb..1d0b106 100644
--- a/flang/test/Semantics/resolve94.f90
+++ b/flang/test/Semantics/resolve94.f90
@@ -35,7 +35,7 @@ subroutine s1()
rVar1 = rCoarray[1,intArray,3]
! OK
rVar1 = rCoarray[1,2,3,STAT=iVar1, TEAM=team2]
- !ERROR: Team value must be of type TEAM_TYPE from module ISO_FORTRAN_ENV
+ !ERROR: TEAM= specifier must have type TEAM_TYPE from ISO_FORTRAN_ENV
rVar1 = rCoarray[1,2,3,STAT=iVar1, TEAM=2]
! OK
rVar1 = rCoarray[1,2,3,STAT=iVar1, TEAM_NUMBER=38]
@@ -48,12 +48,12 @@ subroutine s1()
!ERROR: Must be a scalar value, but is a rank-1 array
rVar1 = rCoarray[1,2,3,STAT=intArray]
! Error on C929, no specifier can appear more than once
- !ERROR: STAT variable can only be specified once
+ !ERROR: coindexed reference has multiple STAT= specifiers
rVar1 = rCoarray[1,2,3,STAT=iVar1, STAT=iVar2]
! OK
rVar1 = rCoarray[1,2,3,TEAM=team1]
! Error on C929, no specifier can appear more than once
- !ERROR: TEAM value can only be specified once
+ !ERROR: coindexed reference has multiple TEAM= or TEAM_NUMBER= specifiers
rVar1 = rCoarray[1,2,3,TEAM=team1, TEAM=team2]
! OK
rVar1 = rCoarray[1,2,3,TEAM_NUMBER=37]
@@ -66,11 +66,11 @@ subroutine s1()
!ERROR: Must have INTEGER type, but is REAL(4)
rVar1 = rCoarray[1,2,3,TEAM_NUMBER=3.7]
! Error on C929, no specifier can appear more than once
- !ERROR: TEAM_NUMBER value can only be specified once
+ !ERROR: coindexed reference has multiple TEAM= or TEAM_NUMBER= specifiers
rVar1 = rCoarray[1,2,3,TEAM_NUMBER=37, TEAM_NUMBER=37]
- !ERROR: Cannot specify both TEAM and TEAM_NUMBER
+ !ERROR: coindexed reference has multiple TEAM= or TEAM_NUMBER= specifiers
rVar1 = rCoarray[1,2,3,TEAM=team1, TEAM_NUMBER=37]
- !ERROR: Cannot specify both TEAM and TEAM_NUMBER
+ !ERROR: coindexed reference has multiple TEAM= or TEAM_NUMBER= specifiers
rVar1 = rCoarray[1,2,3,TEAM_number=43, TEAM=team1]
! OK for a STAT variable to be a coarray integer
rVar1 = rCoarray[1,2,3,stat=intScalarCoarray]
diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt
index b264dcb4..f21fc2f 100644
--- a/libc/CMakeLists.txt
+++ b/libc/CMakeLists.txt
@@ -220,6 +220,7 @@ if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR)
if(LIBC_LIBDIR_SUBDIR)
string(APPEND LIBC_TARGET_SUBDIR /${LIBC_LIBDIR_SUBDIR})
endif()
+ cmake_path(NORMAL_PATH LIBC_TARGET_SUBDIR)
endif()
if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND (LIBC_ENABLE_USE_BY_CLANG OR LIBC_TARGET_OS_IS_GPU))
diff --git a/libclc/CMakeLists.txt b/libclc/CMakeLists.txt
index 50ddfc39..b0e7aac 100644
--- a/libclc/CMakeLists.txt
+++ b/libclc/CMakeLists.txt
@@ -20,7 +20,6 @@ include( GNUInstallDirs )
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS
amdgcn-amdhsa/lib/SOURCES;
amdgcn/lib/SOURCES;
- amdgpu/lib/SOURCES;
clspv/lib/SOURCES;
generic/lib/SOURCES;
ptx-nvidiacl/lib/SOURCES;
diff --git a/libclc/amdgpu/lib/SOURCES b/libclc/amdgpu/lib/SOURCES
deleted file mode 100644
index ab5da40..0000000
--- a/libclc/amdgpu/lib/SOURCES
+++ /dev/null
@@ -1,9 +0,0 @@
-math/half_exp.cl
-math/half_exp10.cl
-math/half_exp2.cl
-math/half_log.cl
-math/half_log10.cl
-math/half_log2.cl
-math/half_recip.cl
-math/half_rsqrt.cl
-math/half_sqrt.cl
diff --git a/libclc/clc/include/clc/math/clc_cos.h b/libclc/clc/include/clc/math/clc_cos.h
new file mode 100644
index 0000000..4468160
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_cos.h
@@ -0,0 +1,19 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_COS_H__
+#define __CLC_MATH_CLC_COS_H__
+
+#define __CLC_BODY <clc/math/unary_decl.inc>
+#define __CLC_FUNCTION __clc_cos
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+
+#endif // __CLC_MATH_CLC_COS_H__
diff --git a/libclc/clc/include/clc/math/clc_half_cos.h b/libclc/clc/include/clc/math/clc_half_cos.h
new file mode 100644
index 0000000..d0efc45
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_cos.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_COS_H__
+#define __CLC_MATH_CLC_HALF_COS_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_cos
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_COS_H__
diff --git a/libclc/clc/include/clc/math/clc_half_divide.h b/libclc/clc/include/clc/math/clc_half_divide.h
new file mode 100644
index 0000000..5d1e7b9
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_divide.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_DIVIDE_H__
+#define __CLC_MATH_CLC_HALF_DIVIDE_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_divide
+#define __CLC_BODY <clc/shared/binary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_DIVIDE_H__
diff --git a/libclc/clc/include/clc/math/clc_half_exp.h b/libclc/clc/include/clc/math/clc_half_exp.h
new file mode 100644
index 0000000..e95f3d4
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_exp.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_EXP_H__
+#define __CLC_MATH_CLC_HALF_EXP_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_exp
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_EXP_H__
diff --git a/libclc/clc/include/clc/math/clc_half_exp10.h b/libclc/clc/include/clc/math/clc_half_exp10.h
new file mode 100644
index 0000000..e4cce6e
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_exp10.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_EXP10_H__
+#define __CLC_MATH_CLC_HALF_EXP10_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_exp10
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_EXP10_H__
diff --git a/libclc/clc/include/clc/math/clc_half_exp2.h b/libclc/clc/include/clc/math/clc_half_exp2.h
new file mode 100644
index 0000000..6fc9973
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_exp2.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_EXP2_H__
+#define __CLC_MATH_CLC_HALF_EXP2_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_exp2
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_EXP2_H__
diff --git a/libclc/clc/include/clc/math/clc_half_log.h b/libclc/clc/include/clc/math/clc_half_log.h
new file mode 100644
index 0000000..e44e686
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_log.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_LOG_H__
+#define __CLC_MATH_CLC_HALF_LOG_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_log
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_LOG_H__
diff --git a/libclc/clc/include/clc/math/clc_half_log10.h b/libclc/clc/include/clc/math/clc_half_log10.h
new file mode 100644
index 0000000..23e0ba1
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_log10.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_LOG10_H__
+#define __CLC_MATH_CLC_HALF_LOG10_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_log10
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_LOG10_H__
diff --git a/libclc/clc/include/clc/math/clc_half_log2.h b/libclc/clc/include/clc/math/clc_half_log2.h
new file mode 100644
index 0000000..8ea2439
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_log2.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_LOG2_H__
+#define __CLC_MATH_CLC_HALF_LOG2_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_log2
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_LOG2_H__
diff --git a/libclc/clc/include/clc/math/clc_half_powr.h b/libclc/clc/include/clc/math/clc_half_powr.h
new file mode 100644
index 0000000..252bfcd7
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_powr.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_POWR_H__
+#define __CLC_MATH_CLC_HALF_POWR_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_powr
+#define __CLC_BODY <clc/shared/binary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_POWR_H__
diff --git a/libclc/clc/include/clc/math/clc_half_recip.h b/libclc/clc/include/clc/math/clc_half_recip.h
new file mode 100644
index 0000000..1adb0bb
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_recip.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_RECIP_H__
+#define __CLC_MATH_CLC_HALF_RECIP_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_recip
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_RECIP_H__
diff --git a/libclc/clc/include/clc/math/clc_half_rsqrt.h b/libclc/clc/include/clc/math/clc_half_rsqrt.h
new file mode 100644
index 0000000..6342739
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_rsqrt.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_RSQRT_H__
+#define __CLC_MATH_CLC_HALF_RSQRT_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_rsqrt
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_RSQRT_H__
diff --git a/libclc/clc/include/clc/math/clc_half_sin.h b/libclc/clc/include/clc/math/clc_half_sin.h
new file mode 100644
index 0000000..31378d2
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_sin.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_SIN_H__
+#define __CLC_MATH_CLC_HALF_SIN_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_sin
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_SIN_H__
diff --git a/libclc/clc/include/clc/math/clc_half_sqrt.h b/libclc/clc/include/clc/math/clc_half_sqrt.h
new file mode 100644
index 0000000..0e765fb
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_sqrt.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_SQRT_H__
+#define __CLC_MATH_CLC_HALF_SQRT_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_sqrt
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_SQRT_H__
diff --git a/libclc/clc/include/clc/math/clc_half_tan.h b/libclc/clc/include/clc/math/clc_half_tan.h
new file mode 100644
index 0000000..6c890a9
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_half_tan.h
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_HALF_TAN_H__
+#define __CLC_MATH_CLC_HALF_TAN_H__
+
+#define __FLOAT_ONLY
+#define __CLC_FUNCTION __clc_half_tan
+#define __CLC_BODY <clc/shared/unary_decl.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+#undef __FLOAT_ONLY
+
+#endif // __CLC_MATH_CLC_HALF_TAN_H__
diff --git a/libclc/clc/include/clc/math/clc_sin.h b/libclc/clc/include/clc/math/clc_sin.h
new file mode 100644
index 0000000..de4c722
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_sin.h
@@ -0,0 +1,19 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_SIN_H__
+#define __CLC_MATH_CLC_SIN_H__
+
+#define __CLC_BODY <clc/math/unary_decl.inc>
+#define __CLC_FUNCTION __clc_sin
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+
+#endif // __CLC_MATH_CLC_SIN_H__
diff --git a/libclc/clc/include/clc/math/clc_sincos.h b/libclc/clc/include/clc/math/clc_sincos.h
new file mode 100644
index 0000000..e26dc7c
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_sincos.h
@@ -0,0 +1,19 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLC_MATH_CLC_SINCOS_H__
+#define __CLC_MATH_CLC_SINCOS_H__
+
+#define __CLC_BODY <clc/math/unary_decl_with_ptr.inc>
+#define __CLC_FUNCTION __clc_sincos
+
+#include <clc/math/gentype.inc>
+
+#undef __CLC_FUNCTION
+
+#endif // __CLC_MATH_CLC_SINCOS_H__
diff --git a/libclc/clc/include/clc/math/clc_sincos_helpers.h b/libclc/clc/include/clc/math/clc_sincos_helpers.h
index 8029d43..e3a2e1c 100644
--- a/libclc/clc/include/clc/math/clc_sincos_helpers.h
+++ b/libclc/clc/include/clc/math/clc_sincos_helpers.h
@@ -16,4 +16,11 @@
#undef __FLOAT_ONLY
+#define __DOUBLE_ONLY
+#define __CLC_BODY <clc/math/clc_sincos_helpers_fp64.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __DOUBLE_ONLY
+
#endif // __CLC_MATH_CLC_SINCOS_HELPERS_H__
diff --git a/libclc/clc/include/clc/math/clc_sincos_helpers_fp64.inc b/libclc/clc/include/clc/math/clc_sincos_helpers_fp64.inc
new file mode 100644
index 0000000..09c6e1c
--- /dev/null
+++ b/libclc/clc/include/clc/math/clc_sincos_helpers_fp64.inc
@@ -0,0 +1,17 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+_CLC_DECL _CLC_OVERLOAD void
+__clc_remainder_piby2_medium(__CLC_DOUBLEN x, private __CLC_DOUBLEN *r,
+ private __CLC_DOUBLEN *rr,
+ private __CLC_INTN *regn);
+
+_CLC_DECL _CLC_OVERLOAD void
+__clc_remainder_piby2_large(__CLC_DOUBLEN x, private __CLC_DOUBLEN *r,
+ private __CLC_DOUBLEN *rr,
+ private __CLC_INTN *regn);
diff --git a/libclc/generic/include/math/clc_tan.h b/libclc/clc/include/clc/math/clc_tan.h
index 52c0a34..028ff28 100644
--- a/libclc/generic/include/math/clc_tan.h
+++ b/libclc/clc/include/clc/math/clc_tan.h
@@ -6,7 +6,14 @@
//
//===----------------------------------------------------------------------===//
-#define __CLC_FUNCTION __clc_tan
+#ifndef __CLC_MATH_CLC_TAN_H__
+#define __CLC_MATH_CLC_TAN_H__
+
#define __CLC_BODY <clc/math/unary_decl.inc>
+#define __CLC_FUNCTION __clc_tan
+
#include <clc/math/gentype.inc>
+
#undef __CLC_FUNCTION
+
+#endif // __CLC_MATH_CLC_TAN_H__
diff --git a/libclc/clc/include/clc/math/tables.h b/libclc/clc/include/clc/math/tables.h
index fb172b0..0fec778 100644
--- a/libclc/clc/include/clc/math/tables.h
+++ b/libclc/clc/include/clc/math/tables.h
@@ -61,7 +61,6 @@
TABLE_FUNCTION_DECL(float2, log2_tbl);
TABLE_FUNCTION_DECL(float2, log10_tbl);
-TABLE_FUNCTION_DECL(uint4, pibits_tbl);
CLC_TABLE_FUNCTION_DECL(float, log_inv_tbl_ep_head);
CLC_TABLE_FUNCTION_DECL(float, log_inv_tbl_ep_tail);
@@ -75,6 +74,7 @@ CLC_TABLE_FUNCTION_DECL(float, cbrt_tbl_head);
CLC_TABLE_FUNCTION_DECL(float, cbrt_tbl_tail);
CLC_TABLE_FUNCTION_DECL(float, sinhcosh_tbl_head);
CLC_TABLE_FUNCTION_DECL(float, sinhcosh_tbl_tail);
+CLC_TABLE_FUNCTION_DECL(ulong, pibits_tbl);
#ifdef cl_khr_fp64
diff --git a/libclc/clc/lib/amdgpu/SOURCES b/libclc/clc/lib/amdgpu/SOURCES
index 31e07b6..83b13ba 100644
--- a/libclc/clc/lib/amdgpu/SOURCES
+++ b/libclc/clc/lib/amdgpu/SOURCES
@@ -1,3 +1,12 @@
+math/clc_half_exp10.cl
+math/clc_half_exp2.cl
+math/clc_half_exp.cl
+math/clc_half_log10.cl
+math/clc_half_log2.cl
+math/clc_half_log.cl
+math/clc_half_recip.cl
+math/clc_half_rsqrt.cl
+math/clc_half_sqrt.cl
math/clc_native_exp2.cl
math/clc_native_exp.cl
math/clc_native_log10.cl
diff --git a/libclc/amdgpu/lib/math/half_exp.cl b/libclc/clc/lib/amdgpu/math/clc_half_exp.cl
index 4cc2b36..5fa9d0b 100644
--- a/libclc/amdgpu/lib/math/half_exp.cl
+++ b/libclc/clc/lib/amdgpu/math/clc_half_exp.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
+#include <clc/math/clc_native_exp.h>
+
#define __CLC_FUNC exp
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_native_unary.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/amdgpu/lib/math/half_exp10.cl b/libclc/clc/lib/amdgpu/math/clc_half_exp10.cl
index 3bcdd24..5c119eb 100644
--- a/libclc/amdgpu/lib/math/half_exp10.cl
+++ b/libclc/clc/lib/amdgpu/math/clc_half_exp10.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
+#include <clc/math/clc_native_exp10.h>
+
#define __CLC_FUNC exp10
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_native_unary.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/amdgpu/math/clc_half_exp2.cl b/libclc/clc/lib/amdgpu/math/clc_half_exp2.cl
new file mode 100644
index 0000000..9750d50
--- /dev/null
+++ b/libclc/clc/lib/amdgpu/math/clc_half_exp2.cl
@@ -0,0 +1,15 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/internal/clc.h>
+#include <clc/math/clc_native_exp2.h>
+
+#define __CLC_FUNC exp2
+#define __FLOAT_ONLY
+#define __CLC_BODY <clc_half_native_unary.inc>
+#include <clc/math/gentype.inc>
diff --git a/libclc/amdgpu/lib/math/half_log.cl b/libclc/clc/lib/amdgpu/math/clc_half_log.cl
index d0e924c..ea19bd5 100644
--- a/libclc/amdgpu/lib/math/half_log.cl
+++ b/libclc/clc/lib/amdgpu/math/clc_half_log.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
+#include <clc/math/clc_native_log.h>
+
#define __CLC_FUNC log
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_native_unary.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/amdgpu/lib/math/half_log10.cl b/libclc/clc/lib/amdgpu/math/clc_half_log10.cl
index 2f3b26e..af3a269 100644
--- a/libclc/amdgpu/lib/math/half_log10.cl
+++ b/libclc/clc/lib/amdgpu/math/clc_half_log10.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
+#include <clc/math/clc_native_log10.h>
+
#define __CLC_FUNC log10
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_native_unary.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/amdgpu/lib/math/half_log2.cl b/libclc/clc/lib/amdgpu/math/clc_half_log2.cl
index 9b417fb..81795f01 100644
--- a/libclc/amdgpu/lib/math/half_log2.cl
+++ b/libclc/clc/lib/amdgpu/math/clc_half_log2.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
+#include <clc/math/clc_native_log2.h>
+
#define __CLC_FUNC log2
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_native_unary.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/amdgpu/lib/math/half_native_unary.inc b/libclc/clc/lib/amdgpu/math/clc_half_native_unary.inc
index 6ecad7e..d623434 100644
--- a/libclc/amdgpu/lib/math/half_native_unary.inc
+++ b/libclc/clc/lib/amdgpu/math/clc_half_native_unary.inc
@@ -8,11 +8,12 @@
#include <clc/utils.h>
-#define __CLC_HALF_FUNC(x) __CLC_CONCAT(half_, x)
-#define __CLC_NATIVE_FUNC(x) __CLC_CONCAT(native_, x)
+#define __CLC_HALF_FUNC(x) __CLC_CONCAT(__clc_half_, x)
+#define __CLC_NATIVE_FUNC(x) __CLC_CONCAT(__clc_native_, x)
-_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __CLC_HALF_FUNC(__CLC_FUNC)(__CLC_GENTYPE val) {
- return __CLC_NATIVE_FUNC(__CLC_FUNC)(val);
+_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE
+__CLC_HALF_FUNC(__CLC_FUNC)(__CLC_GENTYPE val) {
+ return __CLC_NATIVE_FUNC(__CLC_FUNC)(val);
}
#undef __CLC_NATIVE_FUNC
diff --git a/libclc/amdgpu/lib/math/half_recip.cl b/libclc/clc/lib/amdgpu/math/clc_half_recip.cl
index 87d56c8..edbec07 100644
--- a/libclc/amdgpu/lib/math/half_recip.cl
+++ b/libclc/clc/lib/amdgpu/math/clc_half_recip.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
+#include <clc/math/clc_native_recip.h>
+
#define __CLC_FUNC recip
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_native_unary.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/amdgpu/lib/math/half_rsqrt.cl b/libclc/clc/lib/amdgpu/math/clc_half_rsqrt.cl
index 099363a..c0a5489 100644
--- a/libclc/amdgpu/lib/math/half_rsqrt.cl
+++ b/libclc/clc/lib/amdgpu/math/clc_half_rsqrt.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
+#include <clc/math/clc_native_rsqrt.h>
+
#define __CLC_FUNC rsqrt
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_native_unary.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/amdgpu/lib/math/half_sqrt.cl b/libclc/clc/lib/amdgpu/math/clc_half_sqrt.cl
index f7ae5b4..4dc6fa3 100644
--- a/libclc/amdgpu/lib/math/half_sqrt.cl
+++ b/libclc/clc/lib/amdgpu/math/clc_half_sqrt.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
+#include <clc/math/clc_native_sqrt.h>
+
#define __CLC_FUNC sqrt
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_native_unary.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/SOURCES b/libclc/clc/lib/generic/SOURCES
index 4240e7b..2fffc5b 100644
--- a/libclc/clc/lib/generic/SOURCES
+++ b/libclc/clc/lib/generic/SOURCES
@@ -32,6 +32,7 @@ math/clc_atanpi.cl
math/clc_cbrt.cl
math/clc_ceil.cl
math/clc_copysign.cl
+math/clc_cos.cl
math/clc_cosh.cl
math/clc_cospi.cl
math/clc_ep_log.cl
@@ -49,6 +50,20 @@ math/clc_floor.cl
math/clc_fmod.cl
math/clc_fract.cl
math/clc_frexp.cl
+math/clc_half_cos.cl
+math/clc_half_divide.cl
+math/clc_half_exp.cl
+math/clc_half_exp10.cl
+math/clc_half_exp2.cl
+math/clc_half_log.cl
+math/clc_half_log10.cl
+math/clc_half_log2.cl
+math/clc_half_powr.cl
+math/clc_half_rsqrt.cl
+math/clc_half_recip.cl
+math/clc_half_sin.cl
+math/clc_half_sqrt.cl
+math/clc_half_tan.cl
math/clc_hypot.cl
math/clc_ldexp.cl
math/clc_lgamma.cl
@@ -86,12 +101,15 @@ math/clc_rint.cl
math/clc_rootn.cl
math/clc_round.cl
math/clc_rsqrt.cl
+math/clc_sin.cl
+math/clc_sincos.cl
math/clc_sincos_helpers.cl
math/clc_sinh.cl
math/clc_sinpi.cl
math/clc_sqrt.cl
math/clc_sw_fma.cl
math/clc_tables.cl
+math/clc_tan.cl
math/clc_tanh.cl
math/clc_tanpi.cl
math/clc_tgamma.cl
diff --git a/libclc/clc/lib/generic/math/clc_cos.cl b/libclc/clc/lib/generic/math/clc_cos.cl
new file mode 100644
index 0000000..0c9dc28
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_cos.cl
@@ -0,0 +1,21 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/clc_convert.h>
+#include <clc/clcmacro.h>
+#include <clc/float/definitions.h>
+#include <clc/math/clc_fabs.h>
+#include <clc/math/clc_sincos_helpers.h>
+#include <clc/math/clc_sincos_piby4.h>
+#include <clc/math/math.h>
+#include <clc/relational/clc_isinf.h>
+#include <clc/relational/clc_isnan.h>
+#include <clc/relational/clc_select.h>
+
+#define __CLC_BODY <clc_cos.inc>
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_cos.inc b/libclc/clc/lib/generic/math/clc_cos.inc
new file mode 100644
index 0000000..4b8108c
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_cos.inc
@@ -0,0 +1,63 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if __CLC_FPSIZE == 32
+
+_CLC_OVERLOAD _CLC_DEF __CLC_FLOATN __clc_cos(__CLC_FLOATN x) {
+ __CLC_FLOATN absx = __clc_fabs(x);
+
+ __CLC_FLOATN r0, r1;
+ __CLC_INTN regn = __clc_argReductionS(&r0, &r1, absx);
+
+ __CLC_FLOATN ss = -__clc_sinf_piby4(r0, r1);
+ __CLC_FLOATN cc = __clc_cosf_piby4(r0, r1);
+
+ __CLC_FLOATN c = (regn & 1) != 0 ? ss : cc;
+ c = __CLC_AS_FLOATN(__CLC_AS_INTN(c) ^ ((regn > 1) << 31));
+
+ c = __clc_select(c, __CLC_GENTYPE_NAN, __clc_isnan(x) || __clc_isinf(x));
+
+ return c;
+}
+
+#elif __CLC_FPSIZE == 16
+
+_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __clc_cos(__CLC_GENTYPE x) {
+ return __CLC_CONVERT_GENTYPE(__clc_cos(__CLC_CONVERT_FLOATN(x)));
+}
+
+#elif __CLC_FPSIZE == 64
+
+_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __clc_cos(__CLC_GENTYPE x) {
+ __CLC_GENTYPE absx = __clc_fabs(x);
+
+ __CLC_BIT_INTN is_medium = absx < 0x1.0p+47;
+
+ __CLC_INTN regn_m, regn_l;
+ __CLC_GENTYPE r_m, r_l, rr_m, rr_l;
+
+ __clc_remainder_piby2_medium(absx, &r_m, &rr_m, &regn_m);
+ __clc_remainder_piby2_large(absx, &r_l, &rr_l, &regn_l);
+
+ __CLC_GENTYPE r = is_medium ? r_m : r_l;
+ __CLC_GENTYPE rr = is_medium ? rr_m : rr_l;
+ __CLC_INTN regn = __CLC_CONVERT_INTN(is_medium) ? regn_m : regn_l;
+
+ __CLC_GENTYPE sinval, cosval;
+ __clc_sincos_piby4(r, rr, &sinval, &cosval);
+ sinval = -sinval;
+
+ __CLC_LONGN c =
+ __CLC_AS_LONGN(__CLC_CONVERT_BIT_INTN((regn & 1) != 0) ? sinval : cosval);
+ c ^= __CLC_CONVERT_BIT_INTN(regn > 1) << 63;
+
+ return __clc_isnan(absx) | __clc_isinf(absx) ? __CLC_GENTYPE_NAN
+ : __CLC_AS_GENTYPE(c);
+}
+
+#endif
diff --git a/libclc/clc/lib/generic/math/clc_half_cos.cl b/libclc/clc/lib/generic/math/clc_half_cos.cl
new file mode 100644
index 0000000..c2064a0
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_cos.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_cos.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_cos
+#define __CLC_FUNCTION(x) __clc_cos
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/amdgpu/lib/math/half_exp2.cl b/libclc/clc/lib/generic/math/clc_half_divide.cl
index d9efb27a..88676a4 100644
--- a/libclc/amdgpu/lib/math/half_exp2.cl
+++ b/libclc/clc/lib/generic/math/clc_half_divide.cl
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/clc.h>
-
-#define __CLC_FUNC exp2
+#include <clc/internal/clc.h>
+
#define __FLOAT_ONLY
-#define __CLC_BODY <half_native_unary.inc>
+#define __CLC_BODY <clc_half_divide.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_divide.inc b/libclc/clc/lib/generic/math/clc_half_divide.inc
new file mode 100644
index 0000000..27fdb18
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_divide.inc
@@ -0,0 +1,12 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __clc_half_divide(__CLC_GENTYPE x,
+ __CLC_GENTYPE y) {
+ return x / y;
+}
diff --git a/libclc/clc/lib/generic/math/clc_half_exp.cl b/libclc/clc/lib/generic/math/clc_half_exp.cl
new file mode 100644
index 0000000..88336f9
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_exp.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_exp.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_exp
+#define __CLC_FUNCTION(x) __clc_exp
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_exp10.cl b/libclc/clc/lib/generic/math/clc_half_exp10.cl
new file mode 100644
index 0000000..b3c0001
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_exp10.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_exp10.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_exp10
+#define __CLC_FUNCTION(x) __clc_exp10
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_exp2.cl b/libclc/clc/lib/generic/math/clc_half_exp2.cl
new file mode 100644
index 0000000..f3ffcc6
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_exp2.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_exp2.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_exp2
+#define __CLC_FUNCTION(x) __clc_exp2
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_log.cl b/libclc/clc/lib/generic/math/clc_half_log.cl
new file mode 100644
index 0000000..7371344
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_log.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_log.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_log
+#define __CLC_FUNCTION(x) __clc_log
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_log10.cl b/libclc/clc/lib/generic/math/clc_half_log10.cl
new file mode 100644
index 0000000..e9becad
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_log10.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_log10.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_log10
+#define __CLC_FUNCTION(x) __clc_log10
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_log2.cl b/libclc/clc/lib/generic/math/clc_half_log2.cl
new file mode 100644
index 0000000..aa8d838
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_log2.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_log2.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_log2
+#define __CLC_FUNCTION(x) __clc_log2
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_powr.cl b/libclc/clc/lib/generic/math/clc_half_powr.cl
new file mode 100644
index 0000000..ade6731
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_powr.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_powr.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_powr
+#define __CLC_FUNCTION(x) __clc_powr
+#define __CLC_BODY <clc/shared/binary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_recip.cl b/libclc/clc/lib/generic/math/clc_half_recip.cl
new file mode 100644
index 0000000..0ae1e92
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_recip.cl
@@ -0,0 +1,12 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define __FLOAT_ONLY
+#define __CLC_BODY <clc_half_recip.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_recip.inc b/libclc/clc/lib/generic/math/clc_half_recip.inc
new file mode 100644
index 0000000..24ededc
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_recip.inc
@@ -0,0 +1,11 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __clc_half_recip(__CLC_GENTYPE val) {
+ return 1.0f / val;
+}
diff --git a/libclc/clc/lib/generic/math/clc_half_rsqrt.cl b/libclc/clc/lib/generic/math/clc_half_rsqrt.cl
new file mode 100644
index 0000000..07daf7f
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_rsqrt.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_rsqrt.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_rsqrt
+#define __CLC_FUNCTION(x) __clc_rsqrt
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_sin.cl b/libclc/clc/lib/generic/math/clc_half_sin.cl
new file mode 100644
index 0000000..c0af5e7
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_sin.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_sin.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_sin
+#define __CLC_FUNCTION(x) __clc_sin
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_sqrt.cl b/libclc/clc/lib/generic/math/clc_half_sqrt.cl
new file mode 100644
index 0000000..b6b5097
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_sqrt.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_sqrt.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_sqrt
+#define __CLC_FUNCTION(x) __clc_sqrt
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_half_tan.cl b/libclc/clc/lib/generic/math/clc_half_tan.cl
new file mode 100644
index 0000000..ca27f3c
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_half_tan.cl
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/math/clc_tan.h>
+
+#define __FLOAT_ONLY
+#define FUNCTION __clc_half_tan
+#define __CLC_FUNCTION(x) __clc_tan
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_sin.cl b/libclc/clc/lib/generic/math/clc_sin.cl
new file mode 100644
index 0000000..0ff9739
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_sin.cl
@@ -0,0 +1,25 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/clc_convert.h>
+#include <clc/clcmacro.h>
+#include <clc/float/definitions.h>
+#include <clc/internal/clc.h>
+#include <clc/math/clc_fabs.h>
+#include <clc/math/clc_sincos_helpers.h>
+#include <clc/math/clc_sincos_piby4.h>
+#include <clc/math/clc_trunc.h>
+#include <clc/math/math.h>
+#include <clc/math/tables.h>
+#include <clc/relational/clc_isinf.h>
+#include <clc/relational/clc_isnan.h>
+#include <clc/relational/clc_select.h>
+#include <clc/shared/clc_max.h>
+
+#define __CLC_BODY <clc_sin.inc>
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_sin.inc b/libclc/clc/lib/generic/math/clc_sin.inc
new file mode 100644
index 0000000..b4f72eb
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_sin.inc
@@ -0,0 +1,68 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if __CLC_FPSIZE == 32
+
+_CLC_OVERLOAD _CLC_DEF __CLC_FLOATN __clc_sin(__CLC_FLOATN x) {
+ __CLC_FLOATN absx = __clc_fabs(x);
+
+ __CLC_FLOATN r0, r1;
+ __CLC_INTN regn = __clc_argReductionS(&r0, &r1, absx);
+
+ __CLC_FLOATN ss = __clc_sinf_piby4(r0, r1);
+ __CLC_FLOATN cc = __clc_cosf_piby4(r0, r1);
+
+ __CLC_FLOATN s = (regn & 1) != 0 ? cc : ss;
+ s = __CLC_AS_FLOATN(__CLC_AS_INTN(s) ^ ((regn > 1) << 31) ^
+ (__CLC_AS_INTN(x) ^ __CLC_AS_INTN(absx)));
+
+ s = __clc_select(s, __CLC_GENTYPE_NAN, __clc_isnan(x) || __clc_isinf(x));
+
+ // Subnormals
+ s = x == 0.0f ? x : s;
+
+ return s;
+}
+
+#elif __CLC_FPSIZE == 16
+
+_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __clc_sin(__CLC_GENTYPE x) {
+ return __CLC_CONVERT_GENTYPE(__clc_sin(__CLC_CONVERT_FLOATN(x)));
+}
+
+#elif __CLC_FPSIZE == 64
+
+_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __clc_sin(__CLC_GENTYPE x) {
+ __CLC_GENTYPE absx = __clc_fabs(x);
+
+ __CLC_BIT_INTN is_medium = absx < 0x1.0p+47;
+
+ __CLC_INTN regn_m, regn_l;
+ __CLC_GENTYPE r_m, r_l, rr_m, rr_l;
+
+ __clc_remainder_piby2_medium(absx, &r_m, &rr_m, &regn_m);
+ __clc_remainder_piby2_large(absx, &r_l, &rr_l, &regn_l);
+
+ __CLC_GENTYPE r = is_medium ? r_m : r_l;
+ __CLC_GENTYPE rr = is_medium ? rr_m : rr_l;
+ __CLC_INTN regn = __CLC_CONVERT_INTN(is_medium) ? regn_m : regn_l;
+
+ __CLC_GENTYPE sinval, cosval;
+ __clc_sincos_piby4(r, rr, &sinval, &cosval);
+
+ __CLC_LONGN s =
+ __CLC_AS_LONGN(__CLC_CONVERT_BIT_INTN((regn & 1) != 0) ? cosval : sinval);
+
+ s ^= (__CLC_CONVERT_BIT_INTN(regn > 1) << 63) ^
+ (__CLC_CONVERT_BIT_INTN(x < 0.0) << 63);
+
+ return __clc_isinf(x) | __clc_isnan(x) ? __CLC_GENTYPE_NAN
+ : __CLC_AS_GENTYPE(s);
+}
+
+#endif
diff --git a/libclc/clc/lib/generic/math/clc_sincos.cl b/libclc/clc/lib/generic/math/clc_sincos.cl
new file mode 100644
index 0000000..2209a41
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_sincos.cl
@@ -0,0 +1,14 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/internal/clc.h>
+#include <clc/math/clc_cos.h>
+#include <clc/math/clc_sin.h>
+
+#define __CLC_BODY <clc_sincos.inc>
+#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/sincos.inc b/libclc/clc/lib/generic/math/clc_sincos.inc
index b1d4f88..1e21b75 100644
--- a/libclc/generic/lib/math/sincos.inc
+++ b/libclc/clc/lib/generic/math/clc_sincos.inc
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#define __CLC_DECLARE_SINCOS(ADDRSPACE, TYPE) \
- _CLC_OVERLOAD _CLC_DEF TYPE sincos (TYPE x, ADDRSPACE TYPE * cosval) { \
- *cosval = cos(x); \
- return sin(x); \
+#define __CLC_DECLARE_SINCOS(ADDRSPACE, TYPE) \
+ _CLC_OVERLOAD _CLC_DEF TYPE __clc_sincos(TYPE x, ADDRSPACE TYPE *cosval) { \
+ *cosval = __clc_cos(x); \
+ return __clc_sin(x); \
}
__CLC_DECLARE_SINCOS(global, __CLC_GENTYPE)
diff --git a/libclc/clc/lib/generic/math/clc_sincos_helpers.cl b/libclc/clc/lib/generic/math/clc_sincos_helpers.cl
index 24676d3..c1768e3 100644
--- a/libclc/clc/lib/generic/math/clc_sincos_helpers.cl
+++ b/libclc/clc/lib/generic/math/clc_sincos_helpers.cl
@@ -31,3 +31,27 @@
#define __CLC_BODY <clc_sincos_helpers.inc>
#include <clc/math/gentype.inc>
+
+#undef __FLOAT_ONLY
+
+#ifdef cl_khr_fp64
+
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+
+#include <clc/math/clc_fract.h>
+#include <clc/math/tables.h>
+#include <clc/shared/clc_max.h>
+
+#define bytealign(src0, src1, src2) \
+ (__CLC_CONVERT_UINTN( \
+ ((__CLC_CONVERT_LONGN((src0)) << 32) | __CLC_CONVERT_LONGN((src1))) >> \
+ (((src2) & 3) * 8)))
+
+#define __DOUBLE_ONLY
+#define __CLC_BODY <clc_sincos_helpers_fp64.inc>
+
+#include <clc/math/gentype.inc>
+
+#undef __DOUBLE_ONLY
+
+#endif
diff --git a/libclc/clc/lib/generic/math/clc_sincos_helpers.inc b/libclc/clc/lib/generic/math/clc_sincos_helpers.inc
index 516a40c..29c7421 100644
--- a/libclc/clc/lib/generic/math/clc_sincos_helpers.inc
+++ b/libclc/clc/lib/generic/math/clc_sincos_helpers.inc
@@ -90,7 +90,7 @@ _CLC_DECL _CLC_OVERLOAD __CLC_FLOATN __clc_tanf_piby4(__CLC_FLOATN x,
__CLC_FLOATN t = __clc_mad(x * r, __clc_native_divide(a, b), x);
__CLC_FLOATN tr = -MATH_RECIP(t);
- return regn & 1 ? tr : t;
+ return (regn & 1) != 0 ? tr : t;
}
_CLC_DEF _CLC_OVERLOAD void __clc_fullMulS(private __CLC_FLOATN *hi,
diff --git a/libclc/clc/lib/generic/math/clc_sincos_helpers_fp64.inc b/libclc/clc/lib/generic/math/clc_sincos_helpers_fp64.inc
new file mode 100644
index 0000000..9b5776d
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_sincos_helpers_fp64.inc
@@ -0,0 +1,235 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Reduction for medium sized arguments
+_CLC_DEF _CLC_OVERLOAD void
+__clc_remainder_piby2_medium(__CLC_DOUBLEN x, private __CLC_DOUBLEN *r,
+ private __CLC_DOUBLEN *rr,
+ private __CLC_INTN *regn) {
+ // How many pi/2 is x a multiple of?
+ const __CLC_DOUBLEN two_by_pi = 0x1.45f306dc9c883p-1;
+ __CLC_DOUBLEN dnpi2 = __clc_trunc(__clc_fma(x, two_by_pi, 0.5));
+
+ const __CLC_DOUBLEN piby2_h = -7074237752028440.0 / 0x1.0p+52;
+ const __CLC_DOUBLEN piby2_m = -2483878800010755.0 / 0x1.0p+105;
+ const __CLC_DOUBLEN piby2_t = -3956492004828932.0 / 0x1.0p+158;
+
+ // Compute product of npi2 with 159 bits of 2/pi
+ __CLC_DOUBLEN p_hh = piby2_h * dnpi2;
+ __CLC_DOUBLEN p_ht = __clc_fma(piby2_h, dnpi2, -p_hh);
+ __CLC_DOUBLEN p_mh = piby2_m * dnpi2;
+ __CLC_DOUBLEN p_mt = __clc_fma(piby2_m, dnpi2, -p_mh);
+ __CLC_DOUBLEN p_th = piby2_t * dnpi2;
+ __CLC_DOUBLEN p_tt = __clc_fma(piby2_t, dnpi2, -p_th);
+
+ // Reduce to 159 bits
+ __CLC_DOUBLEN ph = p_hh;
+ __CLC_DOUBLEN pm = p_ht + p_mh;
+ __CLC_DOUBLEN t = p_mh - (pm - p_ht);
+ __CLC_DOUBLEN pt = p_th + t + p_mt + p_tt;
+ t = ph + pm;
+ pm = pm - (t - ph);
+ ph = t;
+ t = pm + pt;
+ pt = pt - (t - pm);
+ pm = t;
+
+ // Subtract from x
+ t = x + ph;
+ __CLC_DOUBLEN qh = t + pm;
+ __CLC_DOUBLEN qt = pm - (qh - t) + pt;
+
+ *r = qh;
+ *rr = qt;
+ *regn = __CLC_CONVERT_INTN(__CLC_CONVERT_LONGN(dnpi2)) & 0x3;
+}
+
+// Given positive argument x, reduce it to the range [-pi/4,pi/4] using
+// extra precision, and return the result in r, rr.
+// Return value "regn" tells how many lots of pi/2 were subtracted
+// from x to put it in the range [-pi/4,pi/4], mod 4.
+_CLC_DEF _CLC_OVERLOAD void
+__clc_remainder_piby2_large(__CLC_DOUBLEN x, private __CLC_DOUBLEN *r,
+ private __CLC_DOUBLEN *rr,
+ private __CLC_INTN *regn) {
+
+ __CLC_LONGN ux = __CLC_AS_LONGN(x);
+ __CLC_INTN e = __CLC_CONVERT_INTN(ux >> EXPSHIFTBITS_DP64) - EXPBIAS_DP64;
+ __CLC_INTN i = __clc_max(23, (e >> 3) + 17);
+ __CLC_INTN j = 150 - i;
+ __CLC_INTN j16 = j & ~0xf;
+ __CLC_DOUBLEN fract_temp;
+
+ // The following extracts 192 consecutive bits of 2/pi aligned on an arbitrary
+ // byte boundary
+ __CLC_ULONGN q0 = USE_TABLE(pibits_tbl, j16);
+ __CLC_ULONGN q1 = USE_TABLE(pibits_tbl, (j16 + 8));
+ __CLC_ULONGN q2 = USE_TABLE(pibits_tbl, (j16 + 16));
+ __CLC_ULONGN q3 = USE_TABLE(pibits_tbl, (j16 + 24));
+ __CLC_ULONGN q4 = USE_TABLE(pibits_tbl, (j16 + 32));
+
+ __CLC_UINTN q0s0 = __CLC_CONVERT_UINTN(q0);
+ __CLC_UINTN q0s1 = __CLC_CONVERT_UINTN(q0 >> 32);
+
+ __CLC_UINTN q1s0 = __CLC_CONVERT_UINTN(q1);
+ __CLC_UINTN q1s1 = __CLC_CONVERT_UINTN(q1 >> 32);
+
+ __CLC_UINTN q2s0 = __CLC_CONVERT_UINTN(q2);
+ __CLC_UINTN q2s1 = __CLC_CONVERT_UINTN(q2 >> 32);
+
+ __CLC_UINTN q3s0 = __CLC_CONVERT_UINTN(q3);
+ __CLC_UINTN q3s1 = __CLC_CONVERT_UINTN(q3 >> 32);
+
+ __CLC_UINTN q4s0 = __CLC_CONVERT_UINTN(q4);
+ __CLC_UINTN q4s1 = __CLC_CONVERT_UINTN(q4 >> 32);
+
+ __CLC_INTN k = (j >> 2) & 0x3;
+ __CLC_INTN c0 = k == 0;
+ __CLC_INTN c1 = k == 1;
+ __CLC_INTN c2 = k == 2;
+ __CLC_INTN c3 = k == 3;
+
+ __CLC_UINTN u0, u1, u2, u3, u4, u5, u6;
+
+ u0 = c1 ? q0s1 : q0s0;
+ u0 = c2 ? q1s0 : u0;
+ u0 = c3 ? q1s1 : u0;
+
+ u1 = c1 ? q1s0 : q0s1;
+ u1 = c2 ? q1s1 : u1;
+ u1 = c3 ? q2s0 : u1;
+
+ u2 = c1 ? q1s1 : q1s0;
+ u2 = c2 ? q2s0 : u2;
+ u2 = c3 ? q2s1 : u2;
+
+ u3 = c1 ? q2s0 : q1s1;
+ u3 = c2 ? q2s1 : u3;
+ u3 = c3 ? q3s0 : u3;
+
+ u4 = c1 ? q2s1 : q2s0;
+ u4 = c2 ? q3s0 : u4;
+ u4 = c3 ? q3s1 : u4;
+
+ u5 = c1 ? q3s0 : q2s1;
+ u5 = c2 ? q3s1 : u5;
+ u5 = c3 ? q4s0 : u5;
+
+ u6 = c1 ? q3s1 : q3s0;
+ u6 = c2 ? q4s0 : u6;
+ u6 = c3 ? q4s1 : u6;
+
+ __CLC_UINTN v0 = bytealign(u1, u0, j);
+ __CLC_UINTN v1 = bytealign(u2, u1, j);
+ __CLC_UINTN v2 = bytealign(u3, u2, j);
+ __CLC_UINTN v3 = bytealign(u4, u3, j);
+ __CLC_UINTN v4 = bytealign(u5, u4, j);
+ __CLC_UINTN v5 = bytealign(u6, u5, j);
+
+ // Place those 192 bits in 4 48-bit doubles along with correct exponent
+ // If i > 1018 we would get subnormals so we scale p up and x down to get the
+ // same product
+ i = 2 + 8 * i;
+ x *= __CLC_CONVERT_BIT_INTN(i > 1018) ? 0x1.0p-136 : 1.0;
+ i -= i > 1018 ? 136 : 0;
+
+#define doublen_lohi(x, y) \
+ __CLC_AS_DOUBLEN(__CLC_CONVERT_ULONGN((x)) & 0xFFFFFFFF | \
+ __CLC_CONVERT_ULONGN((y)) << 32)
+
+ __CLC_UINTN ua = __CLC_CONVERT_UINTN(EXPBIAS_DP64 + EXPSHIFTBITS_DP64 - i)
+ << 20;
+ __CLC_DOUBLEN a = doublen_lohi((__CLC_ULONGN)0, ua);
+ __CLC_DOUBLEN p0 = doublen_lohi(v0, ua | (v1 & 0xffffU)) - a;
+ ua += 0x03000000U;
+ a = doublen_lohi((__CLC_ULONGN)0, ua);
+ __CLC_DOUBLEN p1 =
+ doublen_lohi(((v2 << 16) | (v1 >> 16)), (ua | (v2 >> 16))) - a;
+ ua += 0x03000000U;
+ a = doublen_lohi((__CLC_ULONGN)0, ua);
+ __CLC_DOUBLEN p2 = doublen_lohi(v3, (ua | (v4 & 0xffffU))) - a;
+ ua += 0x03000000U;
+ a = doublen_lohi((__CLC_ULONGN)0, ua);
+ __CLC_DOUBLEN p3 =
+ doublen_lohi(((v5 << 16) | (v4 >> 16)), (ua | (v5 >> 16))) - a;
+
+#undef doublen_lohi
+
+ // Exact multiply
+ __CLC_DOUBLEN f0h = p0 * x;
+ __CLC_DOUBLEN f0l = __clc_fma(p0, x, -f0h);
+ __CLC_DOUBLEN f1h = p1 * x;
+ __CLC_DOUBLEN f1l = __clc_fma(p1, x, -f1h);
+ __CLC_DOUBLEN f2h = p2 * x;
+ __CLC_DOUBLEN f2l = __clc_fma(p2, x, -f2h);
+ __CLC_DOUBLEN f3h = p3 * x;
+ __CLC_DOUBLEN f3l = __clc_fma(p3, x, -f3h);
+
+ // Accumulate product into 4 doubles
+ __CLC_DOUBLEN s, t;
+
+ __CLC_DOUBLEN f3 = f3h + f2h;
+ t = f2h - (f3 - f3h);
+ s = f3l + t;
+ t = t - (s - f3l);
+
+ __CLC_DOUBLEN f2 = s + f1h;
+ t = f1h - (f2 - s) + t;
+ s = f2l + t;
+ t = t - (s - f2l);
+
+ __CLC_DOUBLEN f1 = s + f0h;
+ t = f0h - (f1 - s) + t;
+ s = f1l + t;
+
+ __CLC_DOUBLEN f0 = s + f0l;
+
+ // Strip off unwanted large integer bits
+ f3 = 0x1.0p+10 * __clc_fract(f3 * 0x1.0p-10, &fract_temp);
+ f3 += f3 + f2 < 0.0 ? 0x1.0p+10 : 0.0;
+
+ // Compute least significant integer bits
+ t = f3 + f2;
+ __CLC_DOUBLEN di = t - __clc_fract(t, &fract_temp);
+ i = __CLC_CONVERT_INTN(__CLC_CONVERT_FLOATN(di));
+
+ // Shift out remaining integer part
+ f3 -= di;
+ s = f3 + f2;
+ t = f2 - (s - f3);
+ f3 = s;
+ f2 = t;
+ s = f2 + f1;
+ t = f1 - (s - f2);
+ f2 = s;
+ f1 = t;
+ f1 += f0;
+
+ // Subtract 1 if fraction is >= 0.5, and update regn
+ __CLC_INTN g = __CLC_CONVERT_INTN(f3 >= 0.5 ? 1L : 0L);
+ i += g;
+ f3 -= __CLC_CONVERT_DOUBLEN(__CLC_CONVERT_FLOATN(g));
+
+ // Shift up bits
+ s = f3 + f2;
+ t = f2 - (s - f3);
+ f3 = s;
+ f2 = t + f1;
+
+ // Multiply precise fraction by pi/2 to get radians
+ const __CLC_DOUBLEN p2h = 7074237752028440.0 / 0x1.0p+52;
+ const __CLC_DOUBLEN p2t = 4967757600021510.0 / 0x1.0p+106;
+
+ __CLC_DOUBLEN rhi = f3 * p2h;
+ __CLC_DOUBLEN rlo =
+ __clc_fma(f2, p2h, __clc_fma(f3, p2t, __clc_fma(f3, p2h, -rhi)));
+
+ *r = rhi + rlo;
+ *rr = rlo - (*r - rhi);
+ *regn = i & 0x3;
+}
diff --git a/libclc/clc/lib/generic/math/clc_tables.cl b/libclc/clc/lib/generic/math/clc_tables.cl
index 7ee7445..7db0053 100644
--- a/libclc/clc/lib/generic/math/clc_tables.cl
+++ b/libclc/clc/lib/generic/math/clc_tables.cl
@@ -1648,4 +1648,64 @@ CLC_TABLE_FUNCTION(double, SINH_TBL_TAIL, sinh_tbl_tail);
CLC_TABLE_FUNCTION(double, COSH_TBL_HEAD, cosh_tbl_head);
CLC_TABLE_FUNCTION(double, COSH_TBL_TAIL, cosh_tbl_tail);
+DECLARE_TABLE(uchar, PIBITS_TBL, ) = {
+ 224, 241, 27, 193, 12, 88, 33, 116, 53, 126, 196, 126, 237, 175, 169,
+ 75, 74, 41, 222, 231, 28, 244, 236, 197, 151, 175, 31, 235, 158, 212,
+ 181, 168, 127, 121, 154, 253, 24, 61, 221, 38, 44, 159, 60, 251, 217,
+ 180, 125, 180, 41, 104, 45, 70, 188, 188, 63, 96, 22, 120, 255, 95,
+ 226, 127, 236, 160, 228, 247, 46, 126, 17, 114, 210, 231, 76, 13, 230,
+ 88, 71, 230, 4, 249, 125, 209, 154, 192, 113, 166, 19, 18, 237, 186,
+ 212, 215, 8, 162, 251, 156, 166, 196, 114, 172, 119, 248, 115, 72, 70,
+ 39, 168, 187, 36, 25, 128, 75, 55, 9, 233, 184, 145, 220, 134, 21,
+ 239, 122, 175, 142, 69, 249, 7, 65, 14, 241, 100, 86, 138, 109, 3,
+ 119, 211, 212, 71, 95, 157, 240, 167, 84, 16, 57, 185, 13, 230, 139,
+ 2, 0, 0, 0, 0, 0, 0, 0};
+
+_CLC_DEF _CLC_OVERLOAD ulong TABLE_MANGLE(pibits_tbl)(int idx) {
+ return *(__constant ulong *)(PIBITS_TBL + idx);
+}
+_CLC_DEF _CLC_OVERLOAD ulong2 TABLE_MANGLE(pibits_tbl)(int2 idx) {
+ return (ulong2){*(__constant ulong *)(PIBITS_TBL + idx.s0),
+ *(__constant ulong *)(PIBITS_TBL + idx.s1)};
+}
+_CLC_DEF _CLC_OVERLOAD ulong3 TABLE_MANGLE(pibits_tbl)(int3 idx) {
+ return (ulong3){*(__constant ulong *)(PIBITS_TBL + idx.s0),
+ *(__constant ulong *)(PIBITS_TBL + idx.s1),
+ *(__constant ulong *)(PIBITS_TBL + idx.s2)};
+}
+_CLC_DEF _CLC_OVERLOAD ulong4 TABLE_MANGLE(pibits_tbl)(int4 idx) {
+ return (ulong4){*(__constant ulong *)(PIBITS_TBL + idx.s0),
+ *(__constant ulong *)(PIBITS_TBL + idx.s1),
+ *(__constant ulong *)(PIBITS_TBL + idx.s2),
+ *(__constant ulong *)(PIBITS_TBL + idx.s3)};
+}
+_CLC_DEF _CLC_OVERLOAD ulong8 TABLE_MANGLE(pibits_tbl)(int8 idx) {
+ return (ulong8){*(__constant ulong *)(PIBITS_TBL + idx.s0),
+ *(__constant ulong *)(PIBITS_TBL + idx.s1),
+ *(__constant ulong *)(PIBITS_TBL + idx.s2),
+ *(__constant ulong *)(PIBITS_TBL + idx.s3),
+ *(__constant ulong *)(PIBITS_TBL + idx.s4),
+ *(__constant ulong *)(PIBITS_TBL + idx.s5),
+ *(__constant ulong *)(PIBITS_TBL + idx.s6),
+ *(__constant ulong *)(PIBITS_TBL + idx.s7)};
+}
+_CLC_DEF _CLC_OVERLOAD ulong16 TABLE_MANGLE(pibits_tbl)(int16 idx) {
+ return (ulong16){*(__constant ulong *)(PIBITS_TBL + idx.s0),
+ *(__constant ulong *)(PIBITS_TBL + idx.s1),
+ *(__constant ulong *)(PIBITS_TBL + idx.s2),
+ *(__constant ulong *)(PIBITS_TBL + idx.s3),
+ *(__constant ulong *)(PIBITS_TBL + idx.s4),
+ *(__constant ulong *)(PIBITS_TBL + idx.s5),
+ *(__constant ulong *)(PIBITS_TBL + idx.s6),
+ *(__constant ulong *)(PIBITS_TBL + idx.s7),
+ *(__constant ulong *)(PIBITS_TBL + idx.s8),
+ *(__constant ulong *)(PIBITS_TBL + idx.s9),
+ *(__constant ulong *)(PIBITS_TBL + idx.sA),
+ *(__constant ulong *)(PIBITS_TBL + idx.sB),
+ *(__constant ulong *)(PIBITS_TBL + idx.sC),
+ *(__constant ulong *)(PIBITS_TBL + idx.sD),
+ *(__constant ulong *)(PIBITS_TBL + idx.sE),
+ *(__constant ulong *)(PIBITS_TBL + idx.sF)};
+}
+
#endif // cl_khr_fp64
diff --git a/libclc/clc/lib/generic/math/clc_tan.cl b/libclc/clc/lib/generic/math/clc_tan.cl
new file mode 100644
index 0000000..adf42c4
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_tan.cl
@@ -0,0 +1,22 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/clc_convert.h>
+#include <clc/float/definitions.h>
+#include <clc/internal/clc.h>
+#include <clc/math/clc_fabs.h>
+#include <clc/math/clc_sincos_helpers.h>
+#include <clc/math/clc_sincos_piby4.h>
+#include <clc/math/math.h>
+#include <clc/math/tables.h>
+#include <clc/relational/clc_isinf.h>
+#include <clc/relational/clc_isnan.h>
+#include <clc/relational/clc_select.h>
+
+#define __CLC_BODY <clc_tan.inc>
+#include <clc/math/gentype.inc>
diff --git a/libclc/clc/lib/generic/math/clc_tan.inc b/libclc/clc/lib/generic/math/clc_tan.inc
new file mode 100644
index 0000000..8a318a5
--- /dev/null
+++ b/libclc/clc/lib/generic/math/clc_tan.inc
@@ -0,0 +1,61 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if __CLC_FPSIZE == 32
+
+_CLC_DEF _CLC_OVERLOAD __CLC_GENTYPE __clc_tan(__CLC_GENTYPE x) {
+ __CLC_GENTYPE absx = __clc_fabs(x);
+ __CLC_UINTN x_signbit = __CLC_AS_UINTN(x) & SIGNBIT_SP32;
+
+ __CLC_GENTYPE r0, r1;
+ __CLC_INTN regn = __clc_argReductionS(&r0, &r1, absx);
+
+ __CLC_GENTYPE t = __clc_tanf_piby4(r0 + r1, regn);
+ t = __CLC_AS_GENTYPE(__CLC_AS_UINTN(t) ^ x_signbit);
+
+ t = __clc_select(t, __CLC_GENTYPE_NAN, __clc_isnan(x) || __clc_isinf(x));
+ // Take care of subnormals
+ t = (x == 0.0f) ? x : t;
+ return t;
+}
+
+#elif __CLC_FPSIZE == 64
+
+_CLC_DEF _CLC_OVERLOAD __CLC_GENTYPE __clc_tan(__CLC_GENTYPE x) {
+ __CLC_GENTYPE y = __clc_fabs(x);
+
+ __CLC_BIT_INTN is_medium = y < 0x1.0p+30;
+
+ __CLC_INTN regn_m, regn_l;
+ __CLC_GENTYPE r_m, r_l, rr_m, rr_l;
+
+ __clc_remainder_piby2_medium(y, &r_m, &rr_m, &regn_m);
+ __clc_remainder_piby2_large(y, &r_l, &rr_l, &regn_l);
+
+ __CLC_GENTYPE r = is_medium ? r_m : r_l;
+ __CLC_GENTYPE rr = is_medium ? rr_m : rr_l;
+ __CLC_INTN regn = __CLC_CONVERT_INTN(is_medium) ? regn_m : regn_l;
+
+ __CLC_GENTYPE lead, tail;
+ __clc_tan_piby4(r, rr, &lead, &tail);
+
+ __CLC_LONGN t =
+ __CLC_AS_LONGN(__CLC_CONVERT_BIT_INTN((regn & 1) != 0) ? tail : lead);
+ t ^= __CLC_CONVERT_BIT_INTN(x < 0.0) << 63;
+
+ return __clc_isnan(x) || __clc_isinf(x) ? __CLC_GENTYPE_NAN
+ : __CLC_AS_GENTYPE(t);
+}
+
+#elif __CLC_FPSIZE == 16
+
+_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __clc_tan(__CLC_GENTYPE x) {
+ return __CLC_CONVERT_GENTYPE(__clc_tan(__CLC_CONVERT_FLOATN(x)));
+}
+
+#endif
diff --git a/libclc/clspv/lib/SOURCES b/libclc/clspv/lib/SOURCES
index d2fea9d..d7cd36d 100644
--- a/libclc/clspv/lib/SOURCES
+++ b/libclc/clspv/lib/SOURCES
@@ -16,7 +16,6 @@ subnormal_config.cl
../../generic/lib/math/atanh.cl
../../generic/lib/math/atanpi.cl
../../generic/lib/math/cbrt.cl
-../../generic/lib/math/clc_tan.cl
../../generic/lib/math/cos.cl
../../generic/lib/math/cosh.cl
../../generic/lib/math/cospi.cl
@@ -66,10 +65,8 @@ subnormal_config.cl
../../generic/lib/math/rootn.cl
../../generic/lib/math/sin.cl
../../generic/lib/math/sincos.cl
-../../generic/lib/math/sincos_helpers.cl
../../generic/lib/math/sinh.cl
../../generic/lib/math/sinpi.cl
-../../generic/lib/math/tables.cl
../../generic/lib/math/tan.cl
../../generic/lib/math/tanh.cl
../../generic/lib/math/tanpi.cl
diff --git a/libclc/generic/include/clc/math/sincos.h b/libclc/generic/include/clc/math/sincos.h
index fbd29c4..7426c23 100644
--- a/libclc/generic/include/clc/math/sincos.h
+++ b/libclc/generic/include/clc/math/sincos.h
@@ -6,5 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#define __CLC_BODY <clc/math/sincos.inc>
+#define __CLC_BODY <clc/math/unary_decl_with_ptr.inc>
+#define __CLC_FUNCTION __clc_sincos
#include <clc/math/gentype.inc>
+#undef __CLC_FUNCTION
diff --git a/libclc/generic/include/clc/math/sincos.inc b/libclc/generic/include/clc/math/sincos.inc
deleted file mode 100644
index d6ec2fe..0000000
--- a/libclc/generic/include/clc/math/sincos.inc
+++ /dev/null
@@ -1,14 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-_CLC_OVERLOAD _CLC_DECL __CLC_GENTYPE sincos(__CLC_GENTYPE x,
- global __CLC_GENTYPE *cosval);
-_CLC_OVERLOAD _CLC_DECL __CLC_GENTYPE sincos(__CLC_GENTYPE x,
- local __CLC_GENTYPE *cosval);
-_CLC_OVERLOAD _CLC_DECL __CLC_GENTYPE sincos(__CLC_GENTYPE x,
- private __CLC_GENTYPE *cosval);
diff --git a/libclc/generic/lib/SOURCES b/libclc/generic/lib/SOURCES
index 781aaf1..46ce6d6 100644
--- a/libclc/generic/lib/SOURCES
+++ b/libclc/generic/lib/SOURCES
@@ -131,7 +131,6 @@ math/native_rsqrt.cl
math/native_sin.cl
math/native_sqrt.cl
math/native_tan.cl
-math/tables.cl
math/nextafter.cl
math/pow.cl
math/pown.cl
@@ -144,11 +143,9 @@ math/round.cl
math/rsqrt.cl
math/sin.cl
math/sincos.cl
-math/sincos_helpers.cl
math/sinh.cl
math/sinpi.cl
math/sqrt.cl
-math/clc_tan.cl
math/tan.cl
math/tanh.cl
math/tanpi.cl
diff --git a/libclc/generic/lib/math/clc_sw_unary.inc b/libclc/generic/lib/math/clc_sw_unary.inc
deleted file mode 100644
index 6fa051d..0000000
--- a/libclc/generic/lib/math/clc_sw_unary.inc
+++ /dev/null
@@ -1,30 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/utils.h>
-
-#define __CLC_SW_FUNC(x) __CLC_CONCAT(__clc_, x)
-
-#if __CLC_FPSIZE > 16
-_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __CLC_FUNC(__CLC_GENTYPE x) {
- return __CLC_SW_FUNC(__CLC_FUNC)(x);
-}
-#elif __CLC_FPSIZE == 16
-#ifdef __CLC_SCALAR
-_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __CLC_FUNC(__CLC_GENTYPE x) {
- return convert_half(__CLC_SW_FUNC(__CLC_FUNC)(convert_float(x)));
-}
-#else
-_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __CLC_FUNC(__CLC_GENTYPE x) {
- return __CLC_XCONCAT(convert_half, __CLC_VECSIZE)(__CLC_SW_FUNC(__CLC_FUNC)(
- __CLC_XCONCAT(convert_float, __CLC_VECSIZE)(x)));
-}
-#endif
-#endif
-
-#undef __CLC_SW_FUNC
diff --git a/libclc/generic/lib/math/clc_tan.cl b/libclc/generic/lib/math/clc_tan.cl
deleted file mode 100644
index 7e28e9f..0000000
--- a/libclc/generic/lib/math/clc_tan.cl
+++ /dev/null
@@ -1,62 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "sincos_helpers.h"
-#include <clc/clc.h>
-#include <clc/clcmacro.h>
-#include <clc/math/clc_fabs.h>
-#include <clc/math/clc_sincos_helpers.h>
-#include <clc/math/math.h>
-#include <clc/math/tables.h>
-#include <clc/relational/clc_isinf.h>
-#include <clc/relational/clc_isnan.h>
-
-_CLC_DEF _CLC_OVERLOAD float __clc_tan(float x) {
- int ix = as_int(x);
- int ax = ix & 0x7fffffff;
- float dx = as_float(ax);
-
- float r0, r1;
- int regn = __clc_argReductionS(&r0, &r1, dx);
-
- float t = __clc_tanf_piby4(r0 + r1, regn);
- t = as_float(as_int(t) ^ (ix ^ ax));
-
- t = ax >= PINFBITPATT_SP32 ? as_float(QNANBITPATT_SP32) : t;
- // Take care of subnormals
- t = (x == 0.0f) ? x : t;
- return t;
-}
-_CLC_UNARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, float, __clc_tan, float);
-
-#ifdef cl_khr_fp64
-#include <clc/math/clc_sincos_piby4.h>
-
-_CLC_DEF _CLC_OVERLOAD double __clc_tan(double x) {
- double y = __clc_fabs(x);
-
- double r, rr;
- int regn;
-
- if (y < 0x1.0p+30)
- __clc_remainder_piby2_medium(y, &r, &rr, &regn);
- else
- __clc_remainder_piby2_large(y, &r, &rr, &regn);
-
- double lead, tail;
- __clc_tan_piby4(r, rr, &lead, &tail);
-
- int2 t = as_int2(regn & 1 ? tail : lead);
- t.hi ^= (x < 0.0) << 31;
-
- return __clc_isnan(x) || __clc_isinf(x) ? as_double(QNANBITPATT_DP64)
- : as_double(t);
-}
-_CLC_UNARY_VECTORIZE(_CLC_DEF _CLC_OVERLOAD, double, __clc_tan, double);
-
-#endif
diff --git a/libclc/generic/lib/math/cos.cl b/libclc/generic/lib/math/cos.cl
index 00ffa37..5b97c6a 100644
--- a/libclc/generic/lib/math/cos.cl
+++ b/libclc/generic/lib/math/cos.cl
@@ -6,45 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include "sincos_helpers.h"
#include <clc/clc.h>
-#include <clc/clc_convert.h>
-#include <clc/clcmacro.h>
-#include <clc/math/clc_fabs.h>
-#include <clc/math/clc_sincos_helpers.h>
-#include <clc/math/math.h>
-#include <clc/relational/clc_isinf.h>
-#include <clc/relational/clc_isnan.h>
-#include <clc/relational/clc_select.h>
+#include <clc/math/clc_cos.h>
-// FP32 and FP16 versions.
-#define __CLC_BODY <cos.inc>
+#define FUNCTION cos
+#define __CLC_BODY <clc/shared/unary_def.inc>
#include <clc/math/gentype.inc>
-
-#ifdef cl_khr_fp64
-
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-
-_CLC_OVERLOAD _CLC_DEF double cos(double x) {
- x = fabs(x);
-
- double r, rr;
- int regn;
-
- if (x < 0x1.0p+47)
- __clc_remainder_piby2_medium(x, &r, &rr, &regn);
- else
- __clc_remainder_piby2_large(x, &r, &rr, &regn);
-
- double2 sc = __clc_sincos_piby4(r, rr);
- sc.lo = -sc.lo;
-
- int2 c = as_int2(regn & 1 ? sc.lo : sc.hi);
- c.hi ^= (regn > 1) << 31;
-
- return isnan(x) | isinf(x) ? as_double(QNANBITPATT_DP64) : as_double(c);
-}
-
-_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, double, cos, double);
-
-#endif
diff --git a/libclc/generic/lib/math/cos.inc b/libclc/generic/lib/math/cos.inc
deleted file mode 100644
index 1db9671..0000000
--- a/libclc/generic/lib/math/cos.inc
+++ /dev/null
@@ -1,34 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#if __CLC_FPSIZE == 32
-
-_CLC_OVERLOAD _CLC_DEF __CLC_FLOATN cos(__CLC_FLOATN x) {
- __CLC_FLOATN absx = __clc_fabs(x);
-
- __CLC_FLOATN r0, r1;
- __CLC_INTN regn = __clc_argReductionS(&r0, &r1, absx);
-
- __CLC_FLOATN ss = -__clc_sinf_piby4(r0, r1);
- __CLC_FLOATN cc = __clc_cosf_piby4(r0, r1);
-
- __CLC_FLOATN c = (regn & 1) != 0 ? ss : cc;
- c = __CLC_AS_FLOATN(__CLC_AS_INTN(c) ^ ((regn > 1) << 31));
-
- c = __clc_select(c, __CLC_GENTYPE_NAN, __clc_isnan(x) || __clc_isinf(x));
-
- return c;
-}
-
-#elif __CLC_FPSIZE == 16
-
-_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE cos(__CLC_GENTYPE x) {
- return __CLC_CONVERT_GENTYPE(cos(__CLC_CONVERT_FLOATN(x)));
-}
-
-#endif
diff --git a/libclc/generic/lib/math/half_cos.cl b/libclc/generic/lib/math/half_cos.cl
index 2120aa0..1e49ebe 100644
--- a/libclc/generic/lib/math/half_cos.cl
+++ b/libclc/generic/lib/math/half_cos.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_cos.h>
-#define __CLC_FUNC cos
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_cos
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_divide.cl b/libclc/generic/lib/math/half_divide.cl
index 945e212..29b5679 100644
--- a/libclc/generic/lib/math/half_divide.cl
+++ b/libclc/generic/lib/math/half_divide.cl
@@ -7,11 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_divide.h>
-#define divide(x,y) (x/y)
-
-#define __CLC_FUNC divide
-#define __CLC_BODY <half_binary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_divide
+#define __CLC_BODY <clc/shared/binary_def.inc>
+
#include <clc/math/gentype.inc>
-#undef divide
diff --git a/libclc/generic/lib/math/half_exp.cl b/libclc/generic/lib/math/half_exp.cl
index 1f5b0b3..219b262 100644
--- a/libclc/generic/lib/math/half_exp.cl
+++ b/libclc/generic/lib/math/half_exp.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_exp.h>
-#define __CLC_FUNC exp
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_exp
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_exp10.cl b/libclc/generic/lib/math/half_exp10.cl
index 83e4535..64132c0 100644
--- a/libclc/generic/lib/math/half_exp10.cl
+++ b/libclc/generic/lib/math/half_exp10.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_exp10.h>
-#define __CLC_FUNC exp10
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_exp10
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_exp2.cl b/libclc/generic/lib/math/half_exp2.cl
index 229b007..9fddc5e 100644
--- a/libclc/generic/lib/math/half_exp2.cl
+++ b/libclc/generic/lib/math/half_exp2.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_exp2.h>
-#define __CLC_FUNC exp2
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_exp2
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_log.cl b/libclc/generic/lib/math/half_log.cl
index 3df6173..b3d2c27 100644
--- a/libclc/generic/lib/math/half_log.cl
+++ b/libclc/generic/lib/math/half_log.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_log.h>
-#define __CLC_FUNC log
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_log
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_log10.cl b/libclc/generic/lib/math/half_log10.cl
index 84d46fa..8ef6d46 100644
--- a/libclc/generic/lib/math/half_log10.cl
+++ b/libclc/generic/lib/math/half_log10.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_log10.h>
-#define __CLC_FUNC log10
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_log10
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_log2.cl b/libclc/generic/lib/math/half_log2.cl
index a46d258..a343dba 100644
--- a/libclc/generic/lib/math/half_log2.cl
+++ b/libclc/generic/lib/math/half_log2.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_log2.h>
-#define __CLC_FUNC log2
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_log2
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_powr.cl b/libclc/generic/lib/math/half_powr.cl
index 765dd58..03c4ca7 100644
--- a/libclc/generic/lib/math/half_powr.cl
+++ b/libclc/generic/lib/math/half_powr.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_powr.h>
-#define __CLC_FUNC powr
-#define __CLC_BODY <half_binary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_powr
+#define __CLC_BODY <clc/shared/binary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_recip.cl b/libclc/generic/lib/math/half_recip.cl
index 3127380..70f7625 100644
--- a/libclc/generic/lib/math/half_recip.cl
+++ b/libclc/generic/lib/math/half_recip.cl
@@ -7,12 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_recip.h>
-#define recip(x) (1.0f/x)
-
-#define __CLC_FUNC recip
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
-#include <clc/math/gentype.inc>
+#define FUNCTION half_recip
+#define __CLC_BODY <clc/shared/unary_def.inc>
-#undef recip
+#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_rsqrt.cl b/libclc/generic/lib/math/half_rsqrt.cl
index 71b2088..3fc788d 100644
--- a/libclc/generic/lib/math/half_rsqrt.cl
+++ b/libclc/generic/lib/math/half_rsqrt.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_rsqrt.h>
-#define __CLC_FUNC rsqrt
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_rsqrt
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_sin.cl b/libclc/generic/lib/math/half_sin.cl
index 257abcd..6fab611 100644
--- a/libclc/generic/lib/math/half_sin.cl
+++ b/libclc/generic/lib/math/half_sin.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_sin.h>
-#define __CLC_FUNC sin
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_sin
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_sqrt.cl b/libclc/generic/lib/math/half_sqrt.cl
index 70ad80c..e96e127 100644
--- a/libclc/generic/lib/math/half_sqrt.cl
+++ b/libclc/generic/lib/math/half_sqrt.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_sqrt.h>
-#define __CLC_FUNC sqrt
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_sqrt
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_tan.cl b/libclc/generic/lib/math/half_tan.cl
index 12bf779..ddfcfae 100644
--- a/libclc/generic/lib/math/half_tan.cl
+++ b/libclc/generic/lib/math/half_tan.cl
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_half_tan.h>
-#define __CLC_FUNC tan
-#define __CLC_BODY <half_unary.inc>
#define __FLOAT_ONLY
+#define FUNCTION half_tan
+#define __CLC_BODY <clc/shared/unary_def.inc>
+
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/half_unary.inc b/libclc/generic/lib/math/half_unary.inc
deleted file mode 100644
index 5fddb3c..0000000
--- a/libclc/generic/lib/math/half_unary.inc
+++ /dev/null
@@ -1,17 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/utils.h>
-
-#define __CLC_HALF_FUNC(x) __CLC_CONCAT(half_, x)
-
-_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __CLC_HALF_FUNC(__CLC_FUNC)(__CLC_GENTYPE val) {
- return __CLC_FUNC(val);
-}
-
-#undef __CLC_HALF_FUNC
diff --git a/libclc/generic/lib/math/sin.cl b/libclc/generic/lib/math/sin.cl
index f776805..76728cf 100644
--- a/libclc/generic/lib/math/sin.cl
+++ b/libclc/generic/lib/math/sin.cl
@@ -6,44 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#include "sincos_helpers.h"
#include <clc/clc.h>
-#include <clc/clc_convert.h>
-#include <clc/clcmacro.h>
-#include <clc/math/clc_fabs.h>
-#include <clc/math/clc_sincos_helpers.h>
-#include <clc/math/math.h>
-#include <clc/relational/clc_isinf.h>
-#include <clc/relational/clc_isnan.h>
-#include <clc/relational/clc_select.h>
+#include <clc/math/clc_sin.h>
-// FP32 and FP16 versions.
-#define __CLC_BODY <sin.inc>
+#define FUNCTION sin
+#define __CLC_BODY <clc/shared/unary_def.inc>
#include <clc/math/gentype.inc>
-
-#ifdef cl_khr_fp64
-
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-
-_CLC_OVERLOAD _CLC_DEF double sin(double x) {
- double y = fabs(x);
-
- double r, rr;
- int regn;
-
- if (y < 0x1.0p+47)
- __clc_remainder_piby2_medium(y, &r, &rr, &regn);
- else
- __clc_remainder_piby2_large(y, &r, &rr, &regn);
-
- double2 sc = __clc_sincos_piby4(r, rr);
-
- int2 s = as_int2(regn & 1 ? sc.hi : sc.lo);
- s.hi ^= ((regn > 1) << 31) ^ ((x < 0.0) << 31);
-
- return isinf(x) | isnan(x) ? as_double(QNANBITPATT_DP64) : as_double(s);
-}
-
-_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, double, sin, double);
-
-#endif
diff --git a/libclc/generic/lib/math/sin.inc b/libclc/generic/lib/math/sin.inc
deleted file mode 100644
index dbc9911..0000000
--- a/libclc/generic/lib/math/sin.inc
+++ /dev/null
@@ -1,38 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#if __CLC_FPSIZE == 32
-
-_CLC_OVERLOAD _CLC_DEF __CLC_FLOATN sin(__CLC_FLOATN x) {
- __CLC_FLOATN absx = __clc_fabs(x);
-
- __CLC_FLOATN r0, r1;
- __CLC_INTN regn = __clc_argReductionS(&r0, &r1, absx);
-
- __CLC_FLOATN ss = __clc_sinf_piby4(r0, r1);
- __CLC_FLOATN cc = __clc_cosf_piby4(r0, r1);
-
- __CLC_FLOATN s = (regn & 1) != 0 ? cc : ss;
- s = __CLC_AS_FLOATN(__CLC_AS_INTN(s) ^ ((regn > 1) << 31) ^
- (__CLC_AS_INTN(x) ^ __CLC_AS_INTN(absx)));
-
- s = __clc_select(s, __CLC_GENTYPE_NAN, __clc_isnan(x) || __clc_isinf(x));
-
- // Subnormals
- s = x == 0.0f ? x : s;
-
- return s;
-}
-
-#elif __CLC_FPSIZE == 16
-
-_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE sin(__CLC_GENTYPE x) {
- return __CLC_CONVERT_GENTYPE(sin(__CLC_CONVERT_FLOATN(x)));
-}
-
-#endif
diff --git a/libclc/generic/lib/math/sincos.cl b/libclc/generic/lib/math/sincos.cl
index fc0e4d3..25e620c 100644
--- a/libclc/generic/lib/math/sincos.cl
+++ b/libclc/generic/lib/math/sincos.cl
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_sincos.h>
-#define __CLC_BODY <sincos.inc>
+#define FUNCTION sincos
+#define __CLC_BODY <clc/math/unary_def_with_ptr.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/generic/lib/math/sincos_helpers.cl b/libclc/generic/lib/math/sincos_helpers.cl
deleted file mode 100644
index 651cd11..0000000
--- a/libclc/generic/lib/math/sincos_helpers.cl
+++ /dev/null
@@ -1,285 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "sincos_helpers.h"
-#include <clc/clc.h>
-#include <clc/integer/clc_clz.h>
-#include <clc/integer/clc_mul_hi.h>
-#include <clc/math/clc_fma.h>
-#include <clc/math/clc_mad.h>
-#include <clc/math/clc_trunc.h>
-#include <clc/math/math.h>
-#include <clc/math/tables.h>
-#include <clc/shared/clc_max.h>
-
-#ifdef cl_khr_fp64
-
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-
-#define bytealign(src0, src1, src2) \
- ((uint)(((((long)(src0)) << 32) | (long)(src1)) >> (((src2) & 3) * 8)))
-
-// Reduction for medium sized arguments
-_CLC_DEF void __clc_remainder_piby2_medium(double x, private double *r,
- private double *rr,
- private int *regn) {
- // How many pi/2 is x a multiple of?
- const double two_by_pi = 0x1.45f306dc9c883p-1;
- double dnpi2 = __clc_trunc(__clc_fma(x, two_by_pi, 0.5));
-
- const double piby2_h = -7074237752028440.0 / 0x1.0p+52;
- const double piby2_m = -2483878800010755.0 / 0x1.0p+105;
- const double piby2_t = -3956492004828932.0 / 0x1.0p+158;
-
- // Compute product of npi2 with 159 bits of 2/pi
- double p_hh = piby2_h * dnpi2;
- double p_ht = __clc_fma(piby2_h, dnpi2, -p_hh);
- double p_mh = piby2_m * dnpi2;
- double p_mt = __clc_fma(piby2_m, dnpi2, -p_mh);
- double p_th = piby2_t * dnpi2;
- double p_tt = __clc_fma(piby2_t, dnpi2, -p_th);
-
- // Reduce to 159 bits
- double ph = p_hh;
- double pm = p_ht + p_mh;
- double t = p_mh - (pm - p_ht);
- double pt = p_th + t + p_mt + p_tt;
- t = ph + pm;
- pm = pm - (t - ph);
- ph = t;
- t = pm + pt;
- pt = pt - (t - pm);
- pm = t;
-
- // Subtract from x
- t = x + ph;
- double qh = t + pm;
- double qt = pm - (qh - t) + pt;
-
- *r = qh;
- *rr = qt;
- *regn = (int)(long)dnpi2 & 0x3;
-}
-
-// Given positive argument x, reduce it to the range [-pi/4,pi/4] using
-// extra precision, and return the result in r, rr.
-// Return value "regn" tells how many lots of pi/2 were subtracted
-// from x to put it in the range [-pi/4,pi/4], mod 4.
-
-_CLC_DEF void __clc_remainder_piby2_large(double x, private double *r,
- private double *rr,
- private int *regn) {
-
- long ux = as_long(x);
- int e = (int)(ux >> 52) - 1023;
- int i = __clc_max(23, (e >> 3) + 17);
- int j = 150 - i;
- int j16 = j & ~0xf;
- double fract_temp;
-
- // The following extracts 192 consecutive bits of 2/pi aligned on an arbitrary
- // byte boundary
- uint4 q0 = USE_TABLE(pibits_tbl, j16);
- uint4 q1 = USE_TABLE(pibits_tbl, (j16 + 16));
- uint4 q2 = USE_TABLE(pibits_tbl, (j16 + 32));
-
- int k = (j >> 2) & 0x3;
- int4 c = (int4)k == (int4)(0, 1, 2, 3);
-
- uint u0, u1, u2, u3, u4, u5, u6;
-
- u0 = c.s1 ? q0.s1 : q0.s0;
- u0 = c.s2 ? q0.s2 : u0;
- u0 = c.s3 ? q0.s3 : u0;
-
- u1 = c.s1 ? q0.s2 : q0.s1;
- u1 = c.s2 ? q0.s3 : u1;
- u1 = c.s3 ? q1.s0 : u1;
-
- u2 = c.s1 ? q0.s3 : q0.s2;
- u2 = c.s2 ? q1.s0 : u2;
- u2 = c.s3 ? q1.s1 : u2;
-
- u3 = c.s1 ? q1.s0 : q0.s3;
- u3 = c.s2 ? q1.s1 : u3;
- u3 = c.s3 ? q1.s2 : u3;
-
- u4 = c.s1 ? q1.s1 : q1.s0;
- u4 = c.s2 ? q1.s2 : u4;
- u4 = c.s3 ? q1.s3 : u4;
-
- u5 = c.s1 ? q1.s2 : q1.s1;
- u5 = c.s2 ? q1.s3 : u5;
- u5 = c.s3 ? q2.s0 : u5;
-
- u6 = c.s1 ? q1.s3 : q1.s2;
- u6 = c.s2 ? q2.s0 : u6;
- u6 = c.s3 ? q2.s1 : u6;
-
- uint v0 = bytealign(u1, u0, j);
- uint v1 = bytealign(u2, u1, j);
- uint v2 = bytealign(u3, u2, j);
- uint v3 = bytealign(u4, u3, j);
- uint v4 = bytealign(u5, u4, j);
- uint v5 = bytealign(u6, u5, j);
-
- // Place those 192 bits in 4 48-bit doubles along with correct exponent
- // If i > 1018 we would get subnormals so we scale p up and x down to get the
- // same product
- i = 2 + 8 * i;
- x *= i > 1018 ? 0x1.0p-136 : 1.0;
- i -= i > 1018 ? 136 : 0;
-
- uint ua = (uint)(1023 + 52 - i) << 20;
- double a = as_double((uint2)(0, ua));
- double p0 = as_double((uint2)(v0, ua | (v1 & 0xffffU))) - a;
- ua += 0x03000000U;
- a = as_double((uint2)(0, ua));
- double p1 = as_double((uint2)((v2 << 16) | (v1 >> 16), ua | (v2 >> 16))) - a;
- ua += 0x03000000U;
- a = as_double((uint2)(0, ua));
- double p2 = as_double((uint2)(v3, ua | (v4 & 0xffffU))) - a;
- ua += 0x03000000U;
- a = as_double((uint2)(0, ua));
- double p3 = as_double((uint2)((v5 << 16) | (v4 >> 16), ua | (v5 >> 16))) - a;
-
- // Exact multiply
- double f0h = p0 * x;
- double f0l = __clc_fma(p0, x, -f0h);
- double f1h = p1 * x;
- double f1l = __clc_fma(p1, x, -f1h);
- double f2h = p2 * x;
- double f2l = __clc_fma(p2, x, -f2h);
- double f3h = p3 * x;
- double f3l = __clc_fma(p3, x, -f3h);
-
- // Accumulate product into 4 doubles
- double s, t;
-
- double f3 = f3h + f2h;
- t = f2h - (f3 - f3h);
- s = f3l + t;
- t = t - (s - f3l);
-
- double f2 = s + f1h;
- t = f1h - (f2 - s) + t;
- s = f2l + t;
- t = t - (s - f2l);
-
- double f1 = s + f0h;
- t = f0h - (f1 - s) + t;
- s = f1l + t;
-
- double f0 = s + f0l;
-
- // Strip off unwanted large integer bits
- f3 = 0x1.0p+10 * fract(f3 * 0x1.0p-10, &fract_temp);
- f3 += f3 + f2 < 0.0 ? 0x1.0p+10 : 0.0;
-
- // Compute least significant integer bits
- t = f3 + f2;
- double di = t - fract(t, &fract_temp);
- i = (float)di;
-
- // Shift out remaining integer part
- f3 -= di;
- s = f3 + f2;
- t = f2 - (s - f3);
- f3 = s;
- f2 = t;
- s = f2 + f1;
- t = f1 - (s - f2);
- f2 = s;
- f1 = t;
- f1 += f0;
-
- // Subtract 1 if fraction is >= 0.5, and update regn
- int g = f3 >= 0.5;
- i += g;
- f3 -= (float)g;
-
- // Shift up bits
- s = f3 + f2;
- t = f2 - (s - f3);
- f3 = s;
- f2 = t + f1;
-
- // Multiply precise fraction by pi/2 to get radians
- const double p2h = 7074237752028440.0 / 0x1.0p+52;
- const double p2t = 4967757600021510.0 / 0x1.0p+106;
-
- double rhi = f3 * p2h;
- double rlo = __clc_fma(f2, p2h, __clc_fma(f3, p2t, __clc_fma(f3, p2h, -rhi)));
-
- *r = rhi + rlo;
- *rr = rlo - (*r - rhi);
- *regn = i & 0x3;
-}
-
-_CLC_DEF double2 __clc_sincos_piby4(double x, double xx) {
- // Taylor series for sin(x) is x - x^3/3! + x^5/5! - x^7/7! ...
- // = x * (1 - x^2/3! + x^4/5! - x^6/7! ...
- // = x * f(w)
- // where w = x*x and f(w) = (1 - w/3! + w^2/5! - w^3/7! ...
- // We use a minimax approximation of (f(w) - 1) / w
- // because this produces an expansion in even powers of x.
- // If xx (the tail of x) is non-zero, we add a correction
- // term g(x,xx) = (1-x*x/2)*xx to the result, where g(x,xx)
- // is an approximation to cos(x)*sin(xx) valid because
- // xx is tiny relative to x.
-
- // Taylor series for cos(x) is 1 - x^2/2! + x^4/4! - x^6/6! ...
- // = f(w)
- // where w = x*x and f(w) = (1 - w/2! + w^2/4! - w^3/6! ...
- // We use a minimax approximation of (f(w) - 1 + w/2) / (w*w)
- // because this produces an expansion in even powers of x.
- // If xx (the tail of x) is non-zero, we subtract a correction
- // term g(x,xx) = x*xx to the result, where g(x,xx)
- // is an approximation to sin(x)*sin(xx) valid because
- // xx is tiny relative to x.
-
- const double sc1 = -0.166666666666666646259241729;
- const double sc2 = 0.833333333333095043065222816e-2;
- const double sc3 = -0.19841269836761125688538679e-3;
- const double sc4 = 0.275573161037288022676895908448e-5;
- const double sc5 = -0.25051132068021699772257377197e-7;
- const double sc6 = 0.159181443044859136852668200e-9;
-
- const double cc1 = 0.41666666666666665390037e-1;
- const double cc2 = -0.13888888888887398280412e-2;
- const double cc3 = 0.248015872987670414957399e-4;
- const double cc4 = -0.275573172723441909470836e-6;
- const double cc5 = 0.208761463822329611076335e-8;
- const double cc6 = -0.113826398067944859590880e-10;
-
- double x2 = x * x;
- double x3 = x2 * x;
- double r = 0.5 * x2;
- double t = 1.0 - r;
-
- double sp = __clc_fma(
- __clc_fma(__clc_fma(__clc_fma(sc6, x2, sc5), x2, sc4), x2, sc3), x2, sc2);
-
- double cp =
- t +
- __clc_fma(__clc_fma(__clc_fma(__clc_fma(__clc_fma(__clc_fma(cc6, x2, cc5),
- x2, cc4),
- x2, cc3),
- x2, cc2),
- x2, cc1),
- x2 * x2, __clc_fma(x, xx, (1.0 - t) - r));
-
- double2 ret;
- ret.lo =
- x - __clc_fma(-x3, sc1, __clc_fma(__clc_fma(-x3, sp, 0.5 * xx), x2, -xx));
- ret.hi = cp;
-
- return ret;
-}
-
-#endif
diff --git a/libclc/generic/lib/math/sincos_helpers.h b/libclc/generic/lib/math/sincos_helpers.h
deleted file mode 100644
index 11cb93f..0000000
--- a/libclc/generic/lib/math/sincos_helpers.h
+++ /dev/null
@@ -1,24 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/clcfunc.h>
-#include <clc/clctypes.h>
-
-#ifdef cl_khr_fp64
-
-#pragma OPENCL EXTENSION cl_khr_fp64 : enable
-
-_CLC_DECL void __clc_remainder_piby2_medium(double x, private double *r,
- private double *rr,
- private int *regn);
-_CLC_DECL void __clc_remainder_piby2_large(double x, private double *r,
- private double *rr,
- private int *regn);
-_CLC_DECL double2 __clc_sincos_piby4(double x, double xx);
-
-#endif
diff --git a/libclc/generic/lib/math/tables.cl b/libclc/generic/lib/math/tables.cl
deleted file mode 100644
index 1bda480..0000000
--- a/libclc/generic/lib/math/tables.cl
+++ /dev/null
@@ -1,30 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/clc.h>
-
-#include <clc/math/tables.h>
-
-DECLARE_TABLE(uchar, PIBITS_TBL, ) = {
- 224, 241, 27, 193, 12, 88, 33, 116, 53, 126, 196, 126, 237, 175,
- 169, 75, 74, 41, 222, 231, 28, 244, 236, 197, 151, 175, 31,
- 235, 158, 212, 181, 168, 127, 121, 154, 253, 24, 61, 221, 38,
- 44, 159, 60, 251, 217, 180, 125, 180, 41, 104, 45, 70, 188,
- 188, 63, 96, 22, 120, 255, 95, 226, 127, 236, 160, 228, 247,
- 46, 126, 17, 114, 210, 231, 76, 13, 230, 88, 71, 230, 4, 249,
- 125, 209, 154, 192, 113, 166, 19, 18, 237, 186, 212, 215, 8,
- 162, 251, 156, 166, 196, 114, 172, 119, 248, 115, 72, 70, 39,
- 168, 187, 36, 25, 128, 75, 55, 9, 233, 184, 145, 220, 134, 21,
- 239, 122, 175, 142, 69, 249, 7, 65, 14, 241, 100, 86, 138, 109,
- 3, 119, 211, 212, 71, 95, 157, 240, 167, 84, 16, 57, 185, 13,
- 230, 139, 2, 0, 0, 0, 0, 0, 0, 0
-};
-
-uint4 TABLE_MANGLE(pibits_tbl)(size_t idx) {
- return *(__constant uint4 *)(PIBITS_TBL + idx);
-}
diff --git a/libclc/generic/lib/math/tan.cl b/libclc/generic/lib/math/tan.cl
index ebbaa3a..883e331 100644
--- a/libclc/generic/lib/math/tan.cl
+++ b/libclc/generic/lib/math/tan.cl
@@ -7,9 +7,8 @@
//===----------------------------------------------------------------------===//
#include <clc/clc.h>
+#include <clc/math/clc_tan.h>
-#include <math/clc_tan.h>
-
-#define __CLC_FUNC tan
-#define __CLC_BODY <clc_sw_unary.inc>
+#define FUNCTION tan
+#define __CLC_BODY <clc/shared/unary_def.inc>
#include <clc/math/gentype.inc>
diff --git a/libclc/spirv/lib/SOURCES b/libclc/spirv/lib/SOURCES
index 5446fe1..f3852eb 100644
--- a/libclc/spirv/lib/SOURCES
+++ b/libclc/spirv/lib/SOURCES
@@ -55,7 +55,6 @@ math/fma.cl
../../generic/lib/math/log2.cl
../../generic/lib/math/logb.cl
../../generic/lib/math/modf.cl
-../../generic/lib/math/tables.cl
../../generic/lib/math/pow.cl
../../generic/lib/math/pown.cl
../../generic/lib/math/powr.cl
@@ -64,10 +63,8 @@ math/fma.cl
../../generic/lib/math/rootn.cl
../../generic/lib/math/sin.cl
../../generic/lib/math/sincos.cl
-../../generic/lib/math/sincos_helpers.cl
../../generic/lib/math/sinh.cl
../../generic/lib/math/sinpi.cl
-../../generic/lib/math/clc_tan.cl
../../generic/lib/math/tan.cl
../../generic/lib/math/tanh.cl
../../generic/lib/math/tanpi.cl
diff --git a/libcxx/CMakeLists.txt b/libcxx/CMakeLists.txt
index ac5aece..dffdd7a 100644
--- a/libcxx/CMakeLists.txt
+++ b/libcxx/CMakeLists.txt
@@ -419,6 +419,7 @@ if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)
if(LIBCXX_LIBDIR_SUBDIR)
string(APPEND LIBCXX_TARGET_SUBDIR /${LIBCXX_LIBDIR_SUBDIR})
endif()
+ cmake_path(NORMAL_PATH LIBCXX_TARGET_SUBDIR)
set(LIBCXX_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LIBCXX_TARGET_SUBDIR})
set(LIBCXX_GENERATED_INCLUDE_DIR "${LLVM_BINARY_DIR}/include/c++/v1")
set(LIBCXX_GENERATED_MODULE_DIR "${LLVM_BINARY_DIR}/modules/c++/v1")
diff --git a/libcxx/docs/CodingGuidelines.rst b/libcxx/docs/CodingGuidelines.rst
index c783a4f..4a601df 100644
--- a/libcxx/docs/CodingGuidelines.rst
+++ b/libcxx/docs/CodingGuidelines.rst
@@ -79,9 +79,9 @@ and then check for ``#if _LIBCPP_SOMETHING_ENABLED`` instead of
and then checking for ``#ifdef _LIBCPP_SOMETHING_ENABLED``.
-This makes it significantly easier to catch missing includes, since Clang and GCC will warn when using and undefined
-marco inside an ``#if`` statement when using ``-Wundef``. Some macros in libc++ don't use this style yet, so this only
-applies when introducing a new macro.
+This makes it significantly easier to catch missing includes: Clang and GCC with ``-Wundef`` enabled will warn
+when using an undefined macro inside an ``#if`` statement. Some macros in libc++ don't use this style yet,
+so this guideline only applies when introducing a new macro.
This is partially enforced by the clang-tidy check ``libcpp-internal-ftms``.
@@ -107,9 +107,9 @@ This is enforced by the clang-tidy check ``libcpp-uglify-attributes``.
Use C++11 extensions in C++03 code if they simplify the code
============================================================
-libc++ only supports Clang in C++98/03 mode. Clang provides many C++11 features in C++03, making it possible to write a
-lot of code in a simpler way than if we were restricted to C++03 features. Some use of extensions is even mandatory,
-since libc++ supports move semantics in C++03.
+libc++ supports the C++98/03 mode only with the Clang compiler. Clang provides many C++11 features
+in C++03, making it possible to write a lot of code in a simpler way than if we were restricted to C++03 features.
+Some use of extensions is even mandatory, since libc++ supports move semantics in C++03.
Use ``using`` aliases instead of ``typedef``
============================================
diff --git a/libcxx/docs/Hardening.rst b/libcxx/docs/Hardening.rst
index 097ad4b..1780884 100644
--- a/libcxx/docs/Hardening.rst
+++ b/libcxx/docs/Hardening.rst
@@ -32,8 +32,8 @@ modes are:
including heuristic checks that might have significant performance overhead as
well as internal library assertions. This mode should be used in
non-production environments (such as test suites, CI, or local development).
- We don’t commit to a particular level of performance in this mode and it’s
- *not* intended to be used in production.
+ We do not commit to a particular level of performance in this mode.
+ In particular, this mode is *not* intended to be used in production.
.. note::
@@ -155,13 +155,13 @@ easier to reason about the high-level semantics of a hardening mode.
in the library -- whatever the consequences are, they will happen in the user
code.
-- ``pedantic`` -- checks preconditions that are imposed by the Standard, but
- violating which happens to be benign in libc++.
+- ``pedantic`` -- checks preconditions that are imposed by the C++ standard,
+ but violating which happens to be benign in libc++.
- ``semantic-requirement`` -- checks that the given argument satisfies the
- semantic requirements imposed by the Standard. Typically, there is no simple
- way to completely prove that a semantic requirement is satisfied; thus, this
- would often be a heuristic check and it might be quite expensive.
+ semantic requirements imposed by the C++ standard. Typically, there is no
+ simple way to completely prove that a semantic requirement is satisfied;
+ thus, this would often be a heuristic check and it might be quite expensive.
- ``internal`` -- checks that internal invariants of the library hold. These
assertions don't depend on user input.
@@ -239,7 +239,7 @@ Mapping between the hardening modes and the assertion categories
.. note::
- The categories enabled by each mode are subject to change and users should not
+ The categories enabled by each mode are subject to change. Users should not
rely on the precise assertions enabled by a mode at a given point in time.
However, the library does guarantee to keep the hardening modes stable and
to fulfill the semantics documented here.
diff --git a/libcxx/docs/ImplementationDefinedBehavior.rst b/libcxx/docs/ImplementationDefinedBehavior.rst
index 1f95de7..62f715a 100644
--- a/libcxx/docs/ImplementationDefinedBehavior.rst
+++ b/libcxx/docs/ImplementationDefinedBehavior.rst
@@ -4,8 +4,8 @@
Implementation-defined behavior
===============================
-Contains the implementation details of the implementation-defined behavior in
-libc++. Implementation-defined is mandated to be documented by the Standard.
+This document contains the implementation details of the implementation-defined behavior in libc++.
+The C++ standard mandates that implementation-defined behavior is documented.
.. note:
This page is far from complete.
@@ -17,7 +17,7 @@ Implementation-defined behavior
Updating the Time Zone Database
-------------------------------
-The Standard allows implementations to automatically update the
+The C++ standard allows implementations to automatically update the
*remote time zone database*. Libc++ opts not to do that. Instead calling
- ``std::chrono::remote_version()`` will update the version information of the
@@ -32,7 +32,7 @@ give them full control over the process.
`[ostream.formatted.print]/3 <http://eel.is/c++draft/ostream.formatted.print#3>`_ A terminal capable of displaying Unicode
--------------------------------------------------------------------------------------------------------------------------
-The Standard specifies that the manner in which a stream is determined to refer
+The C++ standard specifies that the manner in which a stream is determined to refer
to a terminal capable of displaying Unicode is implementation-defined. This is
used for ``std::print`` and similar functions taking an ``ostream&`` argument.
@@ -55,9 +55,9 @@ Libc++ determines that a stream is Unicode-capable terminal by:
----------------------------------------------------------------------------------------
Most functions within the Mathematical Special Functions section contain integral indices.
-The Standard specifies the result for larger indices as implementation-defined.
+The C++ standard specifies the result for larger indices as implementation-defined.
Libc++ pursuits reasonable results by choosing the same formulas as for indices below that threshold.
-E.g.
+E.g.,
- ``std::hermite(unsigned n, T x)`` for ``n >= 128``
diff --git a/libcxx/docs/Modules.rst b/libcxx/docs/Modules.rst
index 352a198..ebd851f 100644
--- a/libcxx/docs/Modules.rst
+++ b/libcxx/docs/Modules.rst
@@ -24,7 +24,7 @@ Overview
The module sources are stored in ``.cppm`` files. Modules need to be available
as BMIs, which are ``.pcm`` files for Clang. BMIs are not portable, they depend
-on the compiler used and its compilation flags. Therefore there needs to be a
+on the compiler and the compilation flags used. Therefore there needs to be a
way to distribute the ``.cppm`` files to the user and offer a way for them to
build and use the ``.pcm`` files. It is expected this will be done by build
systems in the future. To aid early adaptor and build system vendors libc++
diff --git a/libcxx/docs/UserDocumentation.rst b/libcxx/docs/UserDocumentation.rst
index 4a11a10..dbacb44 100644
--- a/libcxx/docs/UserDocumentation.rst
+++ b/libcxx/docs/UserDocumentation.rst
@@ -7,29 +7,29 @@ User documentation
.. contents::
:local:
-This page contains information about configuration knobs that can be used by
-users when they know libc++ is used by their toolchain, and how to use libc++
-when it is not the default library used by their toolchain. It is aimed at
-users of libc++: a separate page contains documentation aimed at vendors who
-build and ship libc++ as part of their toolchain.
+This page contains information for users of libc++: how to use libc++ if it is not
+the default library used by the toolchain, and what configuration knobs are available
+if libc++ is used by the toolchain. This page is aimed at users of libc++, whereas a
+separate page contains documentation aimed at vendors who build and ship libc++
+as part of their toolchain.
Using a different version of the C++ Standard
=============================================
-Libc++ implements the various versions of the C++ Standard. Changing the version of
+Libc++ implements the various versions of the C++ standard. Changing the version of
the standard can be done by passing ``-std=c++XY`` to the compiler. Libc++ will
-automatically detect what Standard is being used and will provide functionality that
-matches that Standard in the library.
+automatically detect what standard is being used and will provide functionality that
+matches that standard in the library.
.. code-block:: bash
$ clang++ -std=c++17 test.cpp
-Note that using ``-std=c++XY`` with a version of the Standard that has not been ratified
+Note that using ``-std=c++XY`` with a version of the standard that has not been ratified
yet is considered unstable. While we strive to maintain stability, libc++ may be forced to
-make breaking changes to features shipped in a Standard that hasn't been ratified yet. Use
-these versions of the Standard at your own risk.
+make breaking changes to features shipped in a C++ standard that has not been ratified yet.
+Use these versions of the standard at your own risk.
Using libc++ when it is not the system default
@@ -39,16 +39,16 @@ Usually, libc++ is packaged and shipped by a vendor through some delivery vehicl
(operating system distribution, SDK, toolchain, etc) and users don't need to do
anything special in order to use the library.
-On systems where libc++ is provided but is not the default, Clang provides a flag
-called ``-stdlib=`` that can be used to decide which standard library is used.
+However, on systems where libc++ is provided but is not the default, Clang can be invoked
+with the ``-stdlib=`` flag to select which standard library is used.
Using ``-stdlib=libc++`` will select libc++:
.. code-block:: bash
$ clang++ -stdlib=libc++ test.cpp
-On systems where libc++ is the library in use by default such as macOS and FreeBSD,
-this flag is not required.
+This flag is not required on systems where libc++ is the default standard library,
+such as macOS and FreeBSD.
Enabling experimental C++ Library features
@@ -56,9 +56,9 @@ Enabling experimental C++ Library features
Libc++ provides implementations of some experimental features. Experimental features
are either Technical Specifications (TSes) or official features that were voted to
-the Standard but whose implementation is not complete or stable yet in libc++. Those
-are disabled by default because they are neither API nor ABI stable. However, the
-``-fexperimental-library`` compiler flag can be defined to turn those features on.
+the C++ standard but whose implementation is not complete or stable yet in libc++.
+Those are disabled by default because they are neither API nor ABI stable. However,
+users can enable the ``-fexperimental-library`` compiler flag to turn those features on.
On compilers that do not support the ``-fexperimental-library`` flag (such as GCC),
users can define the ``_LIBCPP_ENABLE_EXPERIMENTAL`` macro and manually link against
@@ -75,7 +75,7 @@ when ``-fexperimental-library`` is passed:
.. note::
Experimental libraries are experimental.
* The contents of the ``<experimental/...>`` headers and the associated static
- library will not remain compatible between versions.
+ library may not remain compatible between versions.
* No guarantees of API or ABI stability are provided.
* When the standardized version of an experimental feature is implemented,
the experimental feature is removed two releases after the non-experimental
@@ -94,6 +94,24 @@ enable or disable extended libc++ behavior.
only intended to be used by vendors and changing their value from the one provided
in your toolchain can lead to unexpected behavior.
+**_LIBCPP_DISABLE_DEPRECATION_WARNINGS**:
+ This macro disables warnings when using deprecated components. For example,
+ using `std::auto_ptr` when compiling in C++11 mode will normally trigger a
+ warning saying that `std::auto_ptr` is deprecated. If the macro is defined,
+ no warning will be emitted. By default, this macro is not defined.
+
+**_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS**:
+ This macro is used to disable all visibility annotations inside libc++.
+ Defining this macro and then building libc++ with hidden visibility gives a
+ build of libc++ which does not export any symbols, which can be useful when
+ building statically for inclusion into another library.
+
+**_LIBCPP_ENABLE_EXPERIMENTAL**:
+ This macro enables experimental features. This can be used on compilers that do
+ not support the ``-fexperimental-library`` flag. When used, users also need to
+ ensure that the appropriate experimental library (usually ``libc++experimental.a``)
+ is linked into their program.
+
**_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS**:
This macro is used to enable -Wthread-safety annotations on libc++'s
``std::mutex`` and ``std::lock_guard``. By default, these annotations are
@@ -102,12 +120,6 @@ enable or disable extended libc++ behavior.
**_LIBCPP_HARDENING_MODE**:
This macro is used to choose the :ref:`hardening mode <using-hardening-modes>`.
-**_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS**:
- This macro is used to disable all visibility annotations inside libc++.
- Defining this macro and then building libc++ with hidden visibility gives a
- build of libc++ which does not export any symbols, which can be useful when
- building statically for inclusion into another library.
-
**_LIBCPP_NO_VCRUNTIME**:
Microsoft's C and C++ headers are fairly entangled, and some of their C++
headers are fairly hard to avoid. In particular, `vcruntime_new.h` gets pulled
@@ -150,18 +162,6 @@ enable or disable extended libc++ behavior.
when updating to a newer version of the library, since transitive includes
that your code was previously relying on may have been removed.
-**_LIBCPP_DISABLE_DEPRECATION_WARNINGS**:
- This macro disables warnings when using deprecated components. For example,
- using `std::auto_ptr` when compiling in C++11 mode will normally trigger a
- warning saying that `std::auto_ptr` is deprecated. If the macro is defined,
- no warning will be emitted. By default, this macro is not defined.
-
-**_LIBCPP_ENABLE_EXPERIMENTAL**:
- This macro enables experimental features. This can be used on compilers that do
- not support the ``-fexperimental-library`` flag. When used, users also need to
- ensure that the appropriate experimental library (usually ``libc++experimental.a``)
- is linked into their program.
-
C++17 Specific Configuration Macros
-----------------------------------
**_LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR**:
@@ -187,13 +187,6 @@ C++17 Specific Configuration Macros
C++20 Specific Configuration Macros
-----------------------------------
-**_LIBCPP_ENABLE_CXX20_REMOVED_UNCAUGHT_EXCEPTION**:
- This macro is used to re-enable `uncaught_exception`.
-
-**_LIBCPP_ENABLE_CXX20_REMOVED_SHARED_PTR_UNIQUE**:
- This macro is used to re-enable the function
- ``std::shared_ptr<...>::unique()``.
-
**_LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS**:
This macro is used to re-enable the `argument_type`, `result_type`,
`first_argument_type`, and `second_argument_type` members of class
@@ -206,6 +199,10 @@ C++20 Specific Configuration Macros
**_LIBCPP_ENABLE_CXX20_REMOVED_RAW_STORAGE_ITERATOR**:
This macro is used to re-enable `raw_storage_iterator`.
+**_LIBCPP_ENABLE_CXX20_REMOVED_SHARED_PTR_UNIQUE**:
+ This macro is used to re-enable the function
+ ``std::shared_ptr<...>::unique()``.
+
**_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER**:
This macro is used to re-enable `get_temporary_buffer` and `return_temporary_buffer`.
@@ -213,10 +210,15 @@ C++20 Specific Configuration Macros
This macro is used to re-enable `is_literal_type`, `is_literal_type_v`,
`result_of` and `result_of_t`.
+**_LIBCPP_ENABLE_CXX20_REMOVED_UNCAUGHT_EXCEPTION**:
+ This macro is used to re-enable `uncaught_exception`.
C++26 Specific Configuration Macros
-----------------------------------
+**_LIBCPP_ENABLE_CXX26_REMOVED_ALLOCATOR_MEMBERS**:
+ This macro is used to re-enable redundant member of ``allocator<T>::is_always_equal``.
+
**_LIBCPP_ENABLE_CXX26_REMOVED_CODECVT**:
This macro is used to re-enable all named declarations in ``<codecvt>``.
@@ -224,9 +226,6 @@ C++26 Specific Configuration Macros
This macro is used to re-enable the function
``std::basic_string<...>::reserve()``.
-**_LIBCPP_ENABLE_CXX26_REMOVED_ALLOCATOR_MEMBERS**:
- This macro is used to re-enable redundant member of ``allocator<T>::is_always_equal``.
-
**_LIBCPP_ENABLE_CXX26_REMOVED_STRSTREAM**:
This macro is used to re-enable all named declarations in ``<strstream>``.
@@ -237,23 +236,23 @@ C++26 Specific Configuration Macros
Libc++ Extensions
=================
-This section documents various extensions provided by libc++, how they're
-provided, and any information regarding how to use them.
+This section documents various extensions provided by libc++
+and any information regarding how to use them.
Extended integral type support
------------------------------
-Several platforms support types that are not specified in the Standard, such as
-the 128-bit integral types ``__int128_t`` and ``__uint128_t``. As an extension,
-libc++ does a best-effort attempt to support these types like other integral
-types, by supporting them notably in:
+Several platforms support types that are not specified in the C++ standard,
+such as the 128-bit integral types ``__int128_t`` and ``__uint128_t``.
+As an extension, libc++ does a best-effort attempt to support these types like
+other integral types, by supporting them notably in:
* ``<bits>``
* ``<charconv>``
* ``<functional>``
-* ``<type_traits>``
* ``<format>``
* ``<random>``
+* ``<type_traits>``
Additional types supported in random distributions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -272,11 +271,10 @@ The exposition only type ``basic-format-string`` and its typedefs
``format_string``, and ``wformat_string`` in C++23. Libc++ makes these types
available in C++20 as an extension.
-For padding Unicode strings the ``format`` library relies on the Unicode
-Standard. Libc++ retroactively updates the Unicode Standard in older C++
-versions. This allows the library to have better estimates for newly introduced
-Unicode code points, without requiring the user to use the latest C++ version
-in their code base.
+For padding Unicode strings the ``format`` library relies on the Unicode standard.
+Libc++ retroactively updates the Unicode standard in older C++ versions.
+This allows the library to have better estimates for newly introduced Unicode code points,
+without requiring the user to use the latest C++ version in their code base.
In C++26 formatting pointers gained a type ``P`` and allows to use
zero-padding. These options have been retroactively applied to C++20.
@@ -297,8 +295,8 @@ pointer to heap-allocated memory, depending on the length of the string.
As of C++20, the constructors are now declared ``constexpr``, which permits strings to be used
during constant-evaluation time. In libc++, as in other common implementations, it is also possible
to constant-initialize a string object (e.g. via declaring a variable with ``constinit`` or
-``constexpr``), but, only if the string is short enough to not require a heap allocation. Reliance
-upon this should be discouraged in portable code, as the allowed length differs based on the
+``constexpr``), but only if the string is short enough to not require a heap allocation.
+Reliance upon this is discouraged in portable code, as the allowed length differs based on the
standard-library implementation and also based on whether the platform uses 32-bit or 64-bit
pointers.
@@ -317,12 +315,15 @@ Turning off ASan annotation in containers
-----------------------------------------
``__asan_annotate_container_with_allocator`` is a customization point to allow users to disable
-`Address Sanitizer annotations for containers <https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow>`_ for specific allocators. This may be necessary for allocators that access allocated memory.
+`Address Sanitizer annotations for containers <https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow>`_ for specific allocators.
+This may be necessary for allocators that access allocated memory.
This customization point exists only when ``_LIBCPP_HAS_ASAN_CONTAINER_ANNOTATIONS_FOR_ALL_ALLOCATORS`` Feature Test Macro is defined.
-For allocators not running destructors, it is also possible to `bulk-unpoison memory <https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning>`_ instead of disabling annotations altogether.
+For allocators not running destructors, it is also possible to `bulk-unpoison memory <https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning>`_
+instead of disabling annotations altogether.
-The struct may be specialized for user-defined allocators. It is a `Cpp17UnaryTypeTrait <http://eel.is/c++draft/type.traits#meta.rqmts>`_ with a base characteristic of ``true_type`` if the container is allowed to use annotations and ``false_type`` otherwise.
+The struct may be specialized for user-defined allocators. It is a `Cpp17UnaryTypeTrait <http://eel.is/c++draft/type.traits#meta.rqmts>`_
+with a base characteristic of ``true_type`` if the container is allowed to use annotations and ``false_type`` otherwise.
The annotations for a ``user_allocator`` can be disabled like this:
@@ -371,7 +372,7 @@ locale behave differently than they otherwise do. By default, wide character
streams don't convert wide characters but input/output them as is. If a
specific locale is imbued, the IO with the underlying stream happens with
regular ``char`` elements, which are converted to/from wide characters
-according to the locale. Note that this doesn't behave as expected if the
+according to the locale. Note that this will not behave as expected if the
stream has been set in Unicode mode.
diff --git a/libcxx/include/__format/format_arg_store.h b/libcxx/include/__format/format_arg_store.h
index dba2dfd..87557aa 100644
--- a/libcxx/include/__format/format_arg_store.h
+++ b/libcxx/include/__format/format_arg_store.h
@@ -17,6 +17,7 @@
#include <__concepts/arithmetic.h>
#include <__concepts/same_as.h>
#include <__config>
+#include <__cstddef/size_t.h>
#include <__format/concepts.h>
#include <__format/format_arg.h>
#include <__type_traits/conditional.h>
@@ -32,6 +33,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace __format {
+template <class _Arr, class _Elem>
+inline constexpr bool __is_bounded_array_of = false;
+
+template <class _Elem, size_t _Len>
+inline constexpr bool __is_bounded_array_of<_Elem[_Len], _Elem> = true;
+
/// \returns The @c __arg_t based on the type of the formatting argument.
///
/// \pre \c __formattable<_Tp, typename _Context::char_type>
@@ -110,7 +117,7 @@ consteval __arg_t __determine_arg_t() {
// Char array
template <class _Context, class _Tp>
- requires(is_array_v<_Tp> && same_as<_Tp, typename _Context::char_type[extent_v<_Tp>]>)
+ requires __is_bounded_array_of<_Tp, typename _Context::char_type>
consteval __arg_t __determine_arg_t() {
return __arg_t::__string_view;
}
@@ -168,13 +175,14 @@ _LIBCPP_HIDE_FROM_ABI basic_format_arg<_Context> __create_format_arg(_Tp& __valu
static_assert(__arg != __arg_t::__none, "the supplied type is not formattable");
static_assert(__formattable_with<_Tp, _Context>);
+ using __context_char_type = _Context::char_type;
// Not all types can be used to directly initialize the
// __basic_format_arg_value. First handle all types needing adjustment, the
// final else requires no adjustment.
if constexpr (__arg == __arg_t::__char_type)
# if _LIBCPP_HAS_WIDE_CHARACTERS
- if constexpr (same_as<typename _Context::char_type, wchar_t> && same_as<_Dp, char>)
+ if constexpr (same_as<__context_char_type, wchar_t> && same_as<_Dp, char>)
return basic_format_arg<_Context>{__arg, static_cast<wchar_t>(static_cast<unsigned char>(__value))};
else
# endif
@@ -189,14 +197,16 @@ _LIBCPP_HIDE_FROM_ABI basic_format_arg<_Context> __create_format_arg(_Tp& __valu
return basic_format_arg<_Context>{__arg, static_cast<unsigned long long>(__value)};
else if constexpr (__arg == __arg_t::__string_view)
// Using std::size on a character array will add the NUL-terminator to the size.
- if constexpr (is_array_v<_Dp>)
- return basic_format_arg<_Context>{
- __arg, basic_string_view<typename _Context::char_type>{__value, extent_v<_Dp> - 1}};
- else
- // When the _Traits or _Allocator are different an implicit conversion will
- // fail.
+ if constexpr (__is_bounded_array_of<_Dp, __context_char_type>) {
+ const __context_char_type* const __pbegin = std::begin(__value);
+ const __context_char_type* const __pzero =
+ char_traits<__context_char_type>::find(__pbegin, extent_v<_Dp>, __context_char_type{});
+ _LIBCPP_ASSERT_VALID_INPUT_RANGE(__pzero != nullptr, "formatting a non-null-terminated array");
return basic_format_arg<_Context>{
- __arg, basic_string_view<typename _Context::char_type>{__value.data(), __value.size()}};
+ __arg, basic_string_view<__context_char_type>{__pbegin, static_cast<size_t>(__pzero - __pbegin)}};
+ } else
+ // When the _Traits or _Allocator are different an implicit conversion will fail.
+ return basic_format_arg<_Context>{__arg, basic_string_view<__context_char_type>{__value.data(), __value.size()}};
else if constexpr (__arg == __arg_t::__ptr)
return basic_format_arg<_Context>{__arg, static_cast<const void*>(__value)};
else if constexpr (__arg == __arg_t::__handle)
diff --git a/libcxx/include/__format/formatter_string.h b/libcxx/include/__format/formatter_string.h
index d71d19a..bad6a4d 100644
--- a/libcxx/include/__format/formatter_string.h
+++ b/libcxx/include/__format/formatter_string.h
@@ -10,6 +10,7 @@
#ifndef _LIBCPP___FORMAT_FORMATTER_STRING_H
#define _LIBCPP___FORMAT_FORMATTER_STRING_H
+#include <__assert>
#include <__config>
#include <__format/concepts.h>
#include <__format/format_parse_context.h>
@@ -17,6 +18,7 @@
#include <__format/formatter_output.h>
#include <__format/parser_std_format_spec.h>
#include <__format/write_escaped.h>
+#include <cstddef>
#include <string>
#include <string_view>
@@ -94,7 +96,9 @@ struct formatter<_CharT[_Size], _CharT> : public __formatter_string<_CharT> {
template <class _FormatContext>
_LIBCPP_HIDE_FROM_ABI typename _FormatContext::iterator
format(const _CharT (&__str)[_Size], _FormatContext& __ctx) const {
- return _Base::format(basic_string_view<_CharT>(__str, _Size), __ctx);
+ const _CharT* const __pzero = char_traits<_CharT>::find(__str, _Size, _CharT{});
+ _LIBCPP_ASSERT_VALID_INPUT_RANGE(__pzero != nullptr, "formatting a non-null-terminated array");
+ return _Base::format(basic_string_view<_CharT>(__str, static_cast<size_t>(__pzero - __str)), __ctx);
}
};
diff --git a/libcxx/include/__utility/pair.h b/libcxx/include/__utility/pair.h
index 99b0eb9..ab390aa 100644
--- a/libcxx/include/__utility/pair.h
+++ b/libcxx/include/__utility/pair.h
@@ -654,42 +654,42 @@ get(const pair<_T1, _T2>&& __p) _NOEXCEPT {
#if _LIBCPP_STD_VER >= 14
template <class _T1, class _T2>
inline _LIBCPP_HIDE_FROM_ABI constexpr _T1& get(pair<_T1, _T2>& __p) _NOEXCEPT {
- return __get_pair<0>::get(__p);
+ return __p.first;
}
template <class _T1, class _T2>
inline _LIBCPP_HIDE_FROM_ABI constexpr _T1 const& get(pair<_T1, _T2> const& __p) _NOEXCEPT {
- return __get_pair<0>::get(__p);
+ return __p.first;
}
template <class _T1, class _T2>
inline _LIBCPP_HIDE_FROM_ABI constexpr _T1&& get(pair<_T1, _T2>&& __p) _NOEXCEPT {
- return __get_pair<0>::get(std::move(__p));
+ return std::forward<_T1&&>(__p.first);
}
template <class _T1, class _T2>
inline _LIBCPP_HIDE_FROM_ABI constexpr _T1 const&& get(pair<_T1, _T2> const&& __p) _NOEXCEPT {
- return __get_pair<0>::get(std::move(__p));
+ return std::forward<_T1 const&&>(__p.first);
}
-template <class _T1, class _T2>
-inline _LIBCPP_HIDE_FROM_ABI constexpr _T1& get(pair<_T2, _T1>& __p) _NOEXCEPT {
- return __get_pair<1>::get(__p);
+template <class _T2, class _T1>
+inline _LIBCPP_HIDE_FROM_ABI constexpr _T2& get(pair<_T1, _T2>& __p) _NOEXCEPT {
+ return __p.second;
}
-template <class _T1, class _T2>
-inline _LIBCPP_HIDE_FROM_ABI constexpr _T1 const& get(pair<_T2, _T1> const& __p) _NOEXCEPT {
- return __get_pair<1>::get(__p);
+template <class _T2, class _T1>
+inline _LIBCPP_HIDE_FROM_ABI constexpr _T2 const& get(pair<_T1, _T2> const& __p) _NOEXCEPT {
+ return __p.second;
}
-template <class _T1, class _T2>
-inline _LIBCPP_HIDE_FROM_ABI constexpr _T1&& get(pair<_T2, _T1>&& __p) _NOEXCEPT {
- return __get_pair<1>::get(std::move(__p));
+template <class _T2, class _T1>
+inline _LIBCPP_HIDE_FROM_ABI constexpr _T2&& get(pair<_T1, _T2>&& __p) _NOEXCEPT {
+ return std::forward<_T2&&>(__p.second);
}
-template <class _T1, class _T2>
-inline _LIBCPP_HIDE_FROM_ABI constexpr _T1 const&& get(pair<_T2, _T1> const&& __p) _NOEXCEPT {
- return __get_pair<1>::get(std::move(__p));
+template <class _T2, class _T1>
+inline _LIBCPP_HIDE_FROM_ABI constexpr _T2 const&& get(pair<_T1, _T2> const&& __p) _NOEXCEPT {
+ return std::forward<_T2 const&&>(__p.second);
}
#endif // _LIBCPP_STD_VER >= 14
diff --git a/libcxx/include/locale b/libcxx/include/locale
index fa2620d..b206bf8 100644
--- a/libcxx/include/locale
+++ b/libcxx/include/locale
@@ -1277,7 +1277,7 @@ _LIBCPP_HIDE_FROM_ABI inline _OutputIterator num_put<_CharT, _OutputIterator>::_
}
auto __res = std::__to_chars_integral(__buffer_ptr, __char_buffer + __buffer_size, __uval, __base);
- _LIBCPP_ASSERT_INTERNAL(__res.__ec == std::errc(), "to_chars: invalid maximum buffer size computed?");
+ _LIBCPP_ASSERT_INTERNAL(__res.__ec == std::errc(0), "to_chars: invalid maximum buffer size computed?");
// Make letters uppercase
if (__flags & ios_base::hex && __flags & ios_base::uppercase) {
diff --git a/libcxx/src/call_once.cpp b/libcxx/src/call_once.cpp
index a398eae..237969a 100644
--- a/libcxx/src/call_once.cpp
+++ b/libcxx/src/call_once.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include <__config>
#include <__mutex/once_flag.h>
#include <__utility/exception_guard.h>
diff --git a/libcxx/src/condition_variable.cpp b/libcxx/src/condition_variable.cpp
index b374760..a87399d 100644
--- a/libcxx/src/condition_variable.cpp
+++ b/libcxx/src/condition_variable.cpp
@@ -7,7 +7,13 @@
//===----------------------------------------------------------------------===//
#include <condition_variable>
+#include <limits>
+#include <ratio>
#include <thread>
+#include <__chrono/duration.h>
+#include <__chrono/system_clock.h>
+#include <__chrono/time_point.h>
+#include <__system_error/throw_system_error.h>
#if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB)
# pragma comment(lib, "pthread")
diff --git a/libcxx/src/filesystem/directory_iterator.cpp b/libcxx/src/filesystem/directory_iterator.cpp
index 7e8e40d..7d00c49 100644
--- a/libcxx/src/filesystem/directory_iterator.cpp
+++ b/libcxx/src/filesystem/directory_iterator.cpp
@@ -8,6 +8,7 @@
#include <__assert>
#include <__config>
+#include <__memory/shared_ptr.h>
#include <errno.h>
#include <filesystem>
#include <stack>
diff --git a/libcxx/src/filesystem/error.h b/libcxx/src/filesystem/error.h
index 7d81d4b..52a18b2 100644
--- a/libcxx/src/filesystem/error.h
+++ b/libcxx/src/filesystem/error.h
@@ -10,6 +10,7 @@
#define FILESYSTEM_ERROR_H
#include <__assert>
+#include <__chrono/time_point.h>
#include <__config>
#include <cerrno>
#include <cstdarg>
diff --git a/libcxx/src/filesystem/filesystem_clock.cpp b/libcxx/src/filesystem/filesystem_clock.cpp
index bec082f..49f65ef 100644
--- a/libcxx/src/filesystem/filesystem_clock.cpp
+++ b/libcxx/src/filesystem/filesystem_clock.cpp
@@ -8,8 +8,10 @@
#include <__config>
#include <__system_error/throw_system_error.h>
+#include <cerrno>
#include <chrono>
#include <filesystem>
+#include <ratio>
#include <time.h>
#if defined(_LIBCPP_WIN32API)
diff --git a/libcxx/src/filesystem/filesystem_error.cpp b/libcxx/src/filesystem/filesystem_error.cpp
index 456b902..0d8185fb 100644
--- a/libcxx/src/filesystem/filesystem_error.cpp
+++ b/libcxx/src/filesystem/filesystem_error.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include <__config>
+#include <__memory/shared_ptr.h>
#include <__utility/unreachable.h>
#include <filesystem>
#include <system_error>
diff --git a/libcxx/src/filesystem/operations.cpp b/libcxx/src/filesystem/operations.cpp
index 23c1c28..b71f94a 100644
--- a/libcxx/src/filesystem/operations.cpp
+++ b/libcxx/src/filesystem/operations.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include <__algorithm/copy.h>
#include <__assert>
#include <__config>
#include <__utility/unreachable.h>
diff --git a/libcxx/src/include/ryu/common.h b/libcxx/src/include/ryu/common.h
index d5168d8..591ec41 100644
--- a/libcxx/src/include/ryu/common.h
+++ b/libcxx/src/include/ryu/common.h
@@ -44,6 +44,7 @@
#include <__assert>
#include <__config>
+#include <cstdint>
#include <cstring>
_LIBCPP_BEGIN_NAMESPACE_STD
diff --git a/libcxx/src/memory.cpp b/libcxx/src/memory.cpp
index 16190c2..9be40cb 100644
--- a/libcxx/src/memory.cpp
+++ b/libcxx/src/memory.cpp
@@ -11,7 +11,9 @@
# define _LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS
#endif
+#include <__functional/hash.h>
#include <memory>
+#include <typeinfo>
#if _LIBCPP_HAS_THREADS
# include <mutex>
diff --git a/libcxx/src/mutex.cpp b/libcxx/src/mutex.cpp
index b2193e2..5b1e7da 100644
--- a/libcxx/src/mutex.cpp
+++ b/libcxx/src/mutex.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include <__assert>
+#include <__system_error/throw_system_error.h>
#include <__thread/id.h>
#include <__utility/exception_guard.h>
#include <limits>
diff --git a/libcxx/src/random.cpp b/libcxx/src/random.cpp
index ff53a3d..5c66448 100644
--- a/libcxx/src/random.cpp
+++ b/libcxx/src/random.cpp
@@ -16,6 +16,7 @@
#include <__system_error/throw_system_error.h>
#include <limits>
#include <random>
+#include <string>
#include <errno.h>
#include <stdio.h>
diff --git a/libcxx/src/ryu/d2fixed.cpp b/libcxx/src/ryu/d2fixed.cpp
index 4cfc395..abfa340 100644
--- a/libcxx/src/ryu/d2fixed.cpp
+++ b/libcxx/src/ryu/d2fixed.cpp
@@ -42,6 +42,7 @@
#include <__assert>
#include <__config>
#include <charconv>
+#include <cstddef>
#include <cstring>
#include "include/ryu/common.h"
diff --git a/libcxx/src/ryu/d2s.cpp b/libcxx/src/ryu/d2s.cpp
index 5b80ed8..c0d1110 100644
--- a/libcxx/src/ryu/d2s.cpp
+++ b/libcxx/src/ryu/d2s.cpp
@@ -42,6 +42,7 @@
#include <__assert>
#include <__config>
#include <charconv>
+#include <cstddef>
#include "include/ryu/common.h"
#include "include/ryu/d2fixed.h"
diff --git a/libcxx/src/ryu/f2s.cpp b/libcxx/src/ryu/f2s.cpp
index f42fbd6..f1f8cdf 100644
--- a/libcxx/src/ryu/f2s.cpp
+++ b/libcxx/src/ryu/f2s.cpp
@@ -42,6 +42,8 @@
#include <__assert>
#include <__config>
#include <charconv>
+#include <cstdint>
+#include <cstddef>
#include "include/ryu/common.h"
#include "include/ryu/d2fixed.h"
diff --git a/libcxx/src/thread.cpp b/libcxx/src/thread.cpp
index db40d9d..028d36e 100644
--- a/libcxx/src/thread.cpp
+++ b/libcxx/src/thread.cpp
@@ -6,8 +6,10 @@
//
//===----------------------------------------------------------------------===//
+#include <__system_error/throw_system_error.h>
#include <__thread/poll_with_backoff.h>
#include <__thread/timed_backoff_policy.h>
+#include <__utility/pair.h>
#include <exception>
#include <future>
#include <limits>
diff --git a/libcxx/test/std/containers/associative/multimap/scary.pass.cpp b/libcxx/test/libcxx/containers/associative/map/scary.compile.pass.cpp
index 2390d8f..89e753f 100644
--- a/libcxx/test/std/containers/associative/multimap/scary.pass.cpp
+++ b/libcxx/test/libcxx/containers/associative/map/scary.compile.pass.cpp
@@ -8,20 +8,19 @@
// <map>
-// class map class multimap
+// class map
+// class multimap
-// Extension: SCARY/N2913 iterator compatibility between map and multimap
+// Extension: SCARY/N2913 iterator compatibility between map and multimap
#include <map>
#include "test_macros.h"
-int main(int, char**) {
+void test() {
typedef std::map<int, int> M1;
typedef std::multimap<int, int> M2;
- M2::iterator i;
- M1::iterator j = i;
- ((void)j);
- return 0;
+ ASSERT_SAME_TYPE(M1::iterator, M2::iterator);
+ ASSERT_SAME_TYPE(M1::const_iterator, M2::const_iterator);
}
diff --git a/libcxx/test/std/containers/associative/multiset/scary.pass.cpp b/libcxx/test/libcxx/containers/associative/set/scary.compile.pass.cpp
index 4d30c27..87ed05d 100644
--- a/libcxx/test/std/containers/associative/multiset/scary.pass.cpp
+++ b/libcxx/test/libcxx/containers/associative/set/scary.compile.pass.cpp
@@ -8,20 +8,19 @@
// <set>
-// class set class multiset
+// class set
+// class multiset
-// Extension: SCARY/N2913 iterator compatibility between set and multiset
+// Extension: SCARY/N2913 iterator compatibility between set and multiset
#include <set>
#include "test_macros.h"
-int main(int, char**) {
+void test() {
typedef std::set<int> M1;
typedef std::multiset<int> M2;
- M2::iterator i;
- M1::iterator j = i;
- ((void)j);
- return 0;
+ ASSERT_SAME_TYPE(M1::iterator, M2::iterator);
+ ASSERT_SAME_TYPE(M1::const_iterator, M2::const_iterator);
}
diff --git a/libcxx/test/std/containers/unord/unord.multimap/scary.pass.cpp b/libcxx/test/libcxx/containers/associative/unord.map/scary.compile.pass.cpp
index 59ade49..db2ef33 100644
--- a/libcxx/test/std/containers/unord/unord.multimap/scary.pass.cpp
+++ b/libcxx/test/libcxx/containers/associative/unord.map/scary.compile.pass.cpp
@@ -8,20 +8,21 @@
// <unordered_map>
-// class unordered_map class unordered_multimap
+// class unordered_map
+// class unordered_multimap
-// Extension: SCARY/N2913 iterator compatibility between unordered_map and unordered_multimap
+// Extension: SCARY/N2913 iterator compatibility between unordered_map and unordered_multimap
#include <unordered_map>
#include "test_macros.h"
-int main(int, char**) {
+void test() {
typedef std::unordered_map<int, int> M1;
typedef std::unordered_multimap<int, int> M2;
- M2::iterator i;
- M1::iterator j = i;
- ((void)j);
- return 0;
+ ASSERT_SAME_TYPE(M1::iterator, M2::iterator);
+ ASSERT_SAME_TYPE(M1::const_iterator, M2::const_iterator);
+ ASSERT_SAME_TYPE(M1::local_iterator, M2::local_iterator);
+ ASSERT_SAME_TYPE(M1::const_local_iterator, M2::const_local_iterator);
}
diff --git a/libcxx/test/std/containers/unord/unord.multiset/scary.pass.cpp b/libcxx/test/libcxx/containers/associative/unord.set/scary.compile.pass.cpp
index 89f575b..cd33e1a 100644
--- a/libcxx/test/std/containers/unord/unord.multiset/scary.pass.cpp
+++ b/libcxx/test/libcxx/containers/associative/unord.set/scary.compile.pass.cpp
@@ -8,20 +8,21 @@
// <unordered_set>
-// class unordered_set class unordered_multiset
+// class unordered_set
+// class unordered_multiset
-// Extension: SCARY/N2913 iterator compatibility between unordered_set and unordered_multiset
+// Extension: SCARY/N2913 iterator compatibility between unordered_set and unordered_multiset
#include <unordered_set>
#include "test_macros.h"
-int main(int, char**) {
+void test() {
typedef std::unordered_set<int> M1;
typedef std::unordered_multiset<int> M2;
- M2::iterator i;
- M1::iterator j = i;
- ((void)j);
- return 0;
+ ASSERT_SAME_TYPE(M1::iterator, M2::iterator);
+ ASSERT_SAME_TYPE(M1::const_iterator, M2::const_iterator);
+ ASSERT_SAME_TYPE(M1::local_iterator, M2::local_iterator);
+ ASSERT_SAME_TYPE(M1::const_local_iterator, M2::const_local_iterator);
}
diff --git a/libcxx/test/libcxx/containers/container.adaptors/flat.map/scary.compile.pass.cpp b/libcxx/test/libcxx/containers/container.adaptors/flat.map/scary.compile.pass.cpp
new file mode 100644
index 0000000..3fff89c
--- /dev/null
+++ b/libcxx/test/libcxx/containers/container.adaptors/flat.map/scary.compile.pass.cpp
@@ -0,0 +1,33 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
+
+// <flat_map>
+
+// class flat_map
+// class flat_multimap
+
+// Extension: SCARY/N2913 iterator compatibility between flat_map and flat_multimap
+// Test for the absence of this feature
+
+#include <flat_map>
+#include <type_traits>
+
+#include "test_macros.h"
+
+void test() {
+ typedef std::flat_map<int, int> M1;
+ typedef std::flat_multimap<int, int> M2;
+
+ static_assert(!std::is_convertible_v<M1::iterator, M2::iterator>);
+ static_assert(!std::is_convertible_v<M2::iterator, M1::iterator>);
+
+ static_assert(!std::is_convertible_v<M1::const_iterator, M2::const_iterator>);
+ static_assert(!std::is_convertible_v<M2::const_iterator, M1::const_iterator>);
+}
diff --git a/libcxx/test/libcxx/utilities/format/format.arguments/format.arg/assert.array.pass.cpp b/libcxx/test/libcxx/utilities/format/format.arguments/format.arg/assert.array.pass.cpp
new file mode 100644
index 0000000..1e9b1d9
--- /dev/null
+++ b/libcxx/test/libcxx/utilities/format/format.arguments/format.arg/assert.array.pass.cpp
@@ -0,0 +1,33 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <format>
+
+// Formatting non-null-terminated character arrays.
+
+// REQUIRES: std-at-least-c++20, has-unix-headers, libcpp-hardening-mode={{extensive|debug}}
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+
+#include <format>
+
+#include "check_assertion.h"
+
+int main(int, char**) {
+ {
+ const char non_null_terminated[3]{'1', '2', '3'};
+ TEST_LIBCPP_ASSERT_FAILURE(std::format("{}", non_null_terminated), "formatting a non-null-terminated array");
+ }
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ {
+ const wchar_t non_null_terminated[3]{L'1', L'2', L'3'};
+ TEST_LIBCPP_ASSERT_FAILURE(std::format(L"{}", non_null_terminated), "formatting a non-null-terminated array");
+ }
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
index 1b3ff52..bc056db 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/formatter.char_array.pass.cpp
@@ -41,13 +41,10 @@ struct Tester {
constexpr Tester(const char (&r)[N]) { __builtin_memcpy(text, r, N); }
char text[N];
- // The size of the array shouldn't include the NUL character.
- static const std::size_t size = N - 1;
-
template <class CharT>
void
test(const std::basic_string<CharT>& expected, const std::basic_string_view<CharT>& fmt, std::size_t offset) const {
- using Str = CharT[size];
+ using Str = CharT[N];
std::basic_format_parse_context<CharT> parse_ctx{fmt};
std::formatter<Str, CharT> formatter;
static_assert(std::semiregular<decltype(formatter)>);
@@ -57,16 +54,25 @@ struct Tester {
assert(std::to_address(it) == std::to_address(fmt.end()) - offset);
std::basic_string<CharT> result;
- auto out = std::back_inserter(result);
+ auto out = std::back_inserter(result);
using FormatCtxT = std::basic_format_context<decltype(out), CharT>;
- std::basic_string<CharT> buffer{text, text + N};
- // Note not too found of this hack
- Str* data = reinterpret_cast<Str*>(const_cast<CharT*>(buffer.c_str()));
-
- FormatCtxT format_ctx =
- test_format_context_create<decltype(out), CharT>(out, std::make_format_args<FormatCtxT>(*data));
- formatter.format(*data, format_ctx);
+ if constexpr (std::is_same_v<CharT, char>) {
+ FormatCtxT format_ctx =
+ test_format_context_create<decltype(out), CharT>(out, std::make_format_args<FormatCtxT>(text));
+ formatter.format(text, format_ctx);
+ }
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ else {
+ Str buffer;
+ for (std::size_t i = 0; i != N; ++i) {
+ buffer[i] = static_cast<CharT>(text[i]);
+ }
+ FormatCtxT format_ctx =
+ test_format_context_create<decltype(out), CharT>(out, std::make_format_args<FormatCtxT>(buffer));
+ formatter.format(buffer, format_ctx);
+ }
+#endif
assert(result == expected);
}
@@ -118,8 +124,8 @@ template <class CharT>
void test_array() {
test_helper_wrapper<" azAZ09,./<>?">(STR(" azAZ09,./<>?"), STR("}"));
- std::basic_string<CharT> s(CSTR("abc\0abc"), 7);
- test_helper_wrapper<"abc\0abc">(s, STR("}"));
+ // Contents after embedded null terminator are not formatted.
+ test_helper_wrapper<"abc\0abc">(STR("abc"), STR("}"));
test_helper_wrapper<"world">(STR("world"), STR("}"));
test_helper_wrapper<"world">(STR("world"), STR("_>}"));
diff --git a/libcxx/test/std/utilities/format/format.functions/format_tests.h b/libcxx/test/std/utilities/format/format.functions/format_tests.h
index 3969b34..60abd4a 100644
--- a/libcxx/test/std/utilities/format/format.functions/format_tests.h
+++ b/libcxx/test/std/utilities/format/format.functions/format_tests.h
@@ -3190,6 +3190,15 @@ void format_tests(TestFunction check, ExceptionTest check_exception) {
check(SV("hello 09azAZ!"), SV("hello {}"), data);
}
{
+ // https://github.com/llvm/llvm-project/issues/115935
+ // Contents after the embedded null character are discarded.
+ CharT buffer[] = {CharT('a'), CharT('b'), CharT('c'), 0, CharT('d'), CharT('e'), CharT('f'), 0};
+ check(SV("hello abc"), SV("hello {}"), buffer);
+ // Even when the last element of the array is not null character.
+ CharT buffer2[] = {CharT('a'), CharT('b'), CharT('c'), 0, CharT('d'), CharT('e'), CharT('f')};
+ check(SV("hello abc"), SV("hello {}"), buffer2);
+ }
+ {
std::basic_string<CharT> data = STR("world");
check(SV("hello world"), SV("hello {}"), data);
}
diff --git a/libcxx/test/std/utilities/utility/pairs/pair.astuple/pairs.by.type.pass.cpp b/libcxx/test/std/utilities/utility/pairs/pair.astuple/pairs.by.type.pass.cpp
index f6940c4..6d1dc9f 100644
--- a/libcxx/test/std/utilities/utility/pairs/pair.astuple/pairs.by.type.pass.cpp
+++ b/libcxx/test/std/utilities/utility/pairs/pair.astuple/pairs.by.type.pass.cpp
@@ -8,81 +8,126 @@
// UNSUPPORTED: c++03, c++11
-#include <utility>
-#include <string>
-#include <type_traits>
+#include <cassert>
#include <complex>
#include <memory>
-
-#include <cassert>
+#include <type_traits>
+#include <utility>
#include "test_macros.h"
-int main(int, char**)
-{
- typedef std::complex<float> cf;
- {
- auto t1 = std::make_pair<int, cf> ( 42, { 1,2 } );
- assert ( std::get<int>(t1) == 42 );
- assert ( std::get<cf>(t1).real() == 1 );
- assert ( std::get<cf>(t1).imag() == 2 );
- }
+TEST_CONSTEXPR_CXX14 bool test() {
+ { // Make sure that references work as expected
+ int i = 1;
+ int j = 2;
{
- const std::pair<int, const int> p1 { 1, 2 };
- const int &i1 = std::get<int>(p1);
- const int &i2 = std::get<const int>(p1);
- assert ( i1 == 1 );
- assert ( i2 == 2 );
- }
+ std::pair<int&, int&&> p(i, std::move(j));
+ assert(&std::get<int&>(p) == &i);
+ assert(&std::get<int&&>(p) == &j);
- {
- typedef std::unique_ptr<int> upint;
- std::pair<upint, int> t(upint(new int(4)), 42);
- upint p = std::get<upint>(std::move(t)); // get rvalue
- assert(*p == 4);
- assert(std::get<upint>(t) == nullptr); // has been moved from
+ assert(&std::get<int&>(std::move(p)) == &i);
+ assert(std::get<int&&>(std::move(p)) == 2);
+
+ const std::pair<int&, int&&> cp(i, std::move(j));
+ assert(&std::get<int&>(cp) == &i);
+ assert(&std::get<int&&>(cp) == &j);
+
+ assert(&std::get<int&>(std::move(cp)) == &i);
+ assert(std::get<int&&>(std::move(cp)) == 2);
}
{
- typedef std::unique_ptr<int> upint;
- const std::pair<upint, int> t(upint(new int(4)), 42);
- static_assert(std::is_same<const upint&&, decltype(std::get<upint>(std::move(t)))>::value, "");
- static_assert(noexcept(std::get<upint>(std::move(t))), "");
- static_assert(std::is_same<const int&&, decltype(std::get<int>(std::move(t)))>::value, "");
- static_assert(noexcept(std::get<int>(std::move(t))), "");
- auto&& p = std::get<upint>(std::move(t)); // get const rvalue
- auto&& i = std::get<int>(std::move(t)); // get const rvalue
- assert(*p == 4);
- assert(i == 42);
- assert(std::get<upint>(t) != nullptr);
+ std::pair<int&&, int&> p(std::move(i), j);
+ assert(&std::get<int&>(p) == &j);
+ assert(&std::get<int&&>(p) == &i);
+
+ assert(&std::get<int&>(std::move(p)) == &j);
+ assert(std::get<int&&>(std::move(p)) == 1);
+
+ const std::pair<int&&, int&> cp(std::move(i), j);
+ assert(&std::get<int&>(cp) == &j);
+ assert(&std::get<int&&>(cp) == &i);
+
+ assert(&std::get<int&>(std::move(cp)) == &j);
+ assert(std::get<int&&>(std::move(cp)) == 1);
}
+ }
- {
- int x = 42;
+ {
+ typedef std::complex<float> cf;
+ auto t1 = std::make_pair<int, cf>(42, {1, 2});
+ assert(std::get<int>(t1) == 42);
+ assert(std::get<cf>(t1).real() == 1);
+ assert(std::get<cf>(t1).imag() == 2);
+ }
+
+ {
+ const std::pair<int, const int> p1{1, 2};
+ const int& i1 = std::get<int>(p1);
+ const int& i2 = std::get<const int>(p1);
+ assert(i1 == 1);
+ assert(i2 == 2);
+ }
+
+ {
+ int x = 42;
int const y = 43;
std::pair<int&, int const&> const p(x, y);
static_assert(std::is_same<int&, decltype(std::get<int&>(std::move(p)))>::value, "");
static_assert(noexcept(std::get<int&>(std::move(p))), "");
static_assert(std::is_same<int const&, decltype(std::get<int const&>(std::move(p)))>::value, "");
static_assert(noexcept(std::get<int const&>(std::move(p))), "");
- }
+ }
- {
- int x = 42;
+ {
+ int x = 42;
int const y = 43;
std::pair<int&&, int const&&> const p(std::move(x), std::move(y));
static_assert(std::is_same<int&&, decltype(std::get<int&&>(std::move(p)))>::value, "");
static_assert(noexcept(std::get<int&&>(std::move(p))), "");
static_assert(std::is_same<int const&&, decltype(std::get<int const&&>(std::move(p)))>::value, "");
static_assert(noexcept(std::get<int const&&>(std::move(p))), "");
- }
+ }
- {
- constexpr const std::pair<int, const int> p { 1, 2 };
+ {
+ constexpr const std::pair<int, const int> p{1, 2};
static_assert(std::get<int>(std::move(p)) == 1, "");
static_assert(std::get<const int>(std::move(p)) == 2, "");
- }
+ }
+
+ return true;
+}
+
+int main(int, char**) {
+ test();
+#if TEST_STD_VER >= 14
+ static_assert(test(), "");
+#endif
+
+ // These tests use types which only work during constant evaluation in very recent standards
+
+ {
+ typedef std::unique_ptr<int> upint;
+ std::pair<upint, int> t(upint(new int(4)), 42);
+ upint p = std::get<upint>(std::move(t)); // get rvalue
+ assert(*p == 4);
+ assert(std::get<upint>(t) == nullptr); // has been moved from
+ }
+
+ {
+ typedef std::unique_ptr<int> upint;
+ const std::pair<upint, int> t(upint(new int(4)), 42);
+ static_assert(std::is_same<const upint&&, decltype(std::get<upint>(std::move(t)))>::value, "");
+ static_assert(noexcept(std::get<upint>(std::move(t))), "");
+ static_assert(std::is_same<const int&&, decltype(std::get<int>(std::move(t)))>::value, "");
+ static_assert(noexcept(std::get<int>(std::move(t))), "");
+ auto&& p = std::get<upint>(std::move(t)); // get const rvalue
+ auto&& i = std::get<int>(std::move(t)); // get const rvalue
+ assert(*p == 4);
+ assert(i == 42);
+ assert(std::get<upint>(t) != nullptr);
+ }
return 0;
}
diff --git a/libcxxabi/CMakeLists.txt b/libcxxabi/CMakeLists.txt
index 6dcfc51..3e2f80b 100644
--- a/libcxxabi/CMakeLists.txt
+++ b/libcxxabi/CMakeLists.txt
@@ -187,6 +187,7 @@ if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)
if(LIBCXXABI_LIBDIR_SUBDIR)
string(APPEND LIBCXXABI_TARGET_SUBDIR /${LIBCXXABI_LIBDIR_SUBDIR})
endif()
+ cmake_path(NORMAL_PATH LIBCXXABI_TARGET_SUBDIR)
set(LIBCXXABI_HEADER_DIR ${LLVM_BINARY_DIR})
set(LIBCXXABI_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LIBCXXABI_TARGET_SUBDIR})
set(LIBCXXABI_INSTALL_LIBRARY_DIR lib${LLVM_LIBDIR_SUFFIX}/${LIBCXXABI_TARGET_SUBDIR} CACHE STRING
diff --git a/libcxxabi/src/demangle/ItaniumDemangle.h b/libcxxabi/src/demangle/ItaniumDemangle.h
index 5baafd2..6acefee 100644
--- a/libcxxabi/src/demangle/ItaniumDemangle.h
+++ b/libcxxabi/src/demangle/ItaniumDemangle.h
@@ -3421,7 +3421,7 @@ const typename AbstractManglingParser<
{"or", OperatorInfo::Binary, false, Node::Prec::Ior, "operator|"},
{"pL", OperatorInfo::Binary, false, Node::Prec::Assign, "operator+="},
{"pl", OperatorInfo::Binary, false, Node::Prec::Additive, "operator+"},
- {"pm", OperatorInfo::Member, /*Named*/ false, Node::Prec::PtrMem,
+ {"pm", OperatorInfo::Member, /*Named*/ true, Node::Prec::PtrMem,
"operator->*"},
{"pp", OperatorInfo::Postfix, false, Node::Prec::Postfix, "operator++"},
{"ps", OperatorInfo::Prefix, false, Node::Prec::Unary, "operator+"},
diff --git a/libcxxabi/test/test_demangle.pass.cpp b/libcxxabi/test/test_demangle.pass.cpp
index 343885d..8e85f53 100644
--- a/libcxxabi/test/test_demangle.pass.cpp
+++ b/libcxxabi/test/test_demangle.pass.cpp
@@ -30248,6 +30248,8 @@ const char* cases[][2] = {
{"_Z3fooPU9__ptrauthILj3ELb1ELj234EEPi", "foo(int* __ptrauth<3u, true, 234u>*)"},
{"_Z3fooIPU9__ptrauthILj1ELb0ELj64EEPiEvT_", "void foo<int* __ptrauth<1u, false, 64u>*>(int* __ptrauth<1u, false, 64u>*)"},
+
+ {"_ZN1CpmEi", "C::operator->*(int)"},
// clang-format on
};
@@ -30293,7 +30295,7 @@ const unsigned NF = sizeof(fp_literal_cases) / sizeof(fp_literal_cases[0]);
const unsigned NEF = sizeof(fp_literal_cases[0].expecting) /
sizeof(fp_literal_cases[0].expecting[0]);
-const char *invalid_cases[] = {
+const char* invalid_cases[] = {
// clang-format off
"_ZIPPreEncode",
"Agentt",
@@ -30351,6 +30353,8 @@ const char *invalid_cases[] = {
"_ZGI3Foo",
"_ZGIW3Foov",
"W1x",
+ "_ZN1CdtEi",
+ "_ZN1CdsEi",
// clang-format on
};
diff --git a/libunwind/CMakeLists.txt b/libunwind/CMakeLists.txt
index 3c8499f..e27f3c2 100644
--- a/libunwind/CMakeLists.txt
+++ b/libunwind/CMakeLists.txt
@@ -145,6 +145,7 @@ if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)
if(LIBUNWIND_LIBDIR_SUBDIR)
string(APPEND LIBUNWIND_TARGET_SUBDIR /${LIBUNWIND_LIBDIR_SUBDIR})
endif()
+ cmake_path(NORMAL_PATH LIBUNWIND_TARGET_SUBDIR)
set(LIBUNWIND_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LIBUNWIND_TARGET_SUBDIR})
set(LIBUNWIND_INSTALL_LIBRARY_DIR lib${LLVM_LIBDIR_SUFFIX}/${LIBUNWIND_TARGET_SUBDIR} CACHE STRING
"Path where built libunwind libraries should be installed.")
diff --git a/lldb/include/lldb/Symbol/CompilerType.h b/lldb/include/lldb/Symbol/CompilerType.h
index fdbc2057..b8badfd 100644
--- a/lldb/include/lldb/Symbol/CompilerType.h
+++ b/lldb/include/lldb/Symbol/CompilerType.h
@@ -433,6 +433,11 @@ public:
CompilerDecl GetStaticFieldWithName(llvm::StringRef name) const;
+ llvm::Expected<CompilerType>
+ GetDereferencedType(ExecutionContext *exe_ctx, std::string &deref_name,
+ uint32_t &deref_byte_size, int32_t &deref_byte_offset,
+ ValueObject *valobj, uint64_t &language_flags) const;
+
llvm::Expected<CompilerType> GetChildCompilerTypeAtIndex(
ExecutionContext *exe_ctx, size_t idx, bool transparent_pointers,
bool omit_empty_base_classes, bool ignore_array_bounds,
diff --git a/lldb/include/lldb/Symbol/TypeSystem.h b/lldb/include/lldb/Symbol/TypeSystem.h
index df87fea..1f1a3ac 100644
--- a/lldb/include/lldb/Symbol/TypeSystem.h
+++ b/lldb/include/lldb/Symbol/TypeSystem.h
@@ -364,6 +364,12 @@ public:
return CompilerDecl();
}
+ virtual llvm::Expected<CompilerType>
+ GetDereferencedType(lldb::opaque_compiler_type_t type,
+ ExecutionContext *exe_ctx, std::string &deref_name,
+ uint32_t &deref_byte_size, int32_t &deref_byte_offset,
+ ValueObject *valobj, uint64_t &language_flags) = 0;
+
virtual llvm::Expected<CompilerType> GetChildCompilerTypeAtIndex(
lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx, size_t idx,
bool transparent_pointers, bool omit_empty_base_classes,
diff --git a/lldb/source/Host/windows/PipeWindows.cpp b/lldb/source/Host/windows/PipeWindows.cpp
index e3f5b62..30b9d1c 100644
--- a/lldb/source/Host/windows/PipeWindows.cpp
+++ b/lldb/source/Host/windows/PipeWindows.cpp
@@ -88,8 +88,9 @@ Status PipeWindows::CreateNew(llvm::StringRef name,
std::string pipe_path = g_pipe_name_prefix.str();
pipe_path.append(name.str());
- SECURITY_ATTRIBUTES sa{sizeof(SECURITY_ATTRIBUTES), 0,
- child_process_inherit ? TRUE : FALSE};
+ // We always create inheritable handles, but we won't pass them to a child
+ // process unless explicitly requested (cf. ProcessLauncherWindows.cpp).
+ SECURITY_ATTRIBUTES sa{sizeof(SECURITY_ATTRIBUTES), 0, TRUE};
// Always open for overlapped i/o. We implement blocking manually in Read
// and Write.
@@ -165,8 +166,9 @@ Status PipeWindows::OpenNamedPipe(llvm::StringRef name,
assert(is_read ? !CanRead() : !CanWrite());
- SECURITY_ATTRIBUTES attributes{sizeof(SECURITY_ATTRIBUTES), 0,
- child_process_inherit ? TRUE : FALSE};
+ // We always create inheritable handles, but we won't pass them to a child
+ // process unless explicitly requested (cf. ProcessLauncherWindows.cpp).
+ SECURITY_ATTRIBUTES attributes{sizeof(SECURITY_ATTRIBUTES), 0, TRUE};
std::string pipe_path = g_pipe_name_prefix.str();
pipe_path.append(name.str());
diff --git a/lldb/source/Host/windows/ProcessLauncherWindows.cpp b/lldb/source/Host/windows/ProcessLauncherWindows.cpp
index 065ba92..bc35667 100644
--- a/lldb/source/Host/windows/ProcessLauncherWindows.cpp
+++ b/lldb/source/Host/windows/ProcessLauncherWindows.cpp
@@ -10,6 +10,7 @@
#include "lldb/Host/HostProcess.h"
#include "lldb/Host/ProcessLaunchInfo.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Program.h"
@@ -65,14 +66,23 @@ ProcessLauncherWindows::LaunchProcess(const ProcessLaunchInfo &launch_info,
std::string executable;
std::vector<char> environment;
- STARTUPINFO startupinfo = {};
+ STARTUPINFOEX startupinfoex = {};
+ STARTUPINFO &startupinfo = startupinfoex.StartupInfo;
PROCESS_INFORMATION pi = {};
HANDLE stdin_handle = GetStdioHandle(launch_info, STDIN_FILENO);
HANDLE stdout_handle = GetStdioHandle(launch_info, STDOUT_FILENO);
HANDLE stderr_handle = GetStdioHandle(launch_info, STDERR_FILENO);
-
- startupinfo.cb = sizeof(startupinfo);
+ auto close_handles = llvm::make_scope_exit([&] {
+ if (stdin_handle)
+ ::CloseHandle(stdin_handle);
+ if (stdout_handle)
+ ::CloseHandle(stdout_handle);
+ if (stderr_handle)
+ ::CloseHandle(stderr_handle);
+ });
+
+ startupinfo.cb = sizeof(startupinfoex);
startupinfo.dwFlags |= STARTF_USESTDHANDLES;
startupinfo.hStdError =
stderr_handle ? stderr_handle : ::GetStdHandle(STD_ERROR_HANDLE);
@@ -81,6 +91,48 @@ ProcessLauncherWindows::LaunchProcess(const ProcessLaunchInfo &launch_info,
startupinfo.hStdOutput =
stdout_handle ? stdout_handle : ::GetStdHandle(STD_OUTPUT_HANDLE);
+ std::vector<HANDLE> inherited_handles;
+ if (startupinfo.hStdError)
+ inherited_handles.push_back(startupinfo.hStdError);
+ if (startupinfo.hStdInput)
+ inherited_handles.push_back(startupinfo.hStdInput);
+ if (startupinfo.hStdOutput)
+ inherited_handles.push_back(startupinfo.hStdOutput);
+
+ size_t attributelist_size = 0;
+ InitializeProcThreadAttributeList(/*lpAttributeList=*/nullptr,
+ /*dwAttributeCount=*/1, /*dwFlags=*/0,
+ &attributelist_size);
+
+ startupinfoex.lpAttributeList =
+ static_cast<LPPROC_THREAD_ATTRIBUTE_LIST>(malloc(attributelist_size));
+ auto free_attributelist =
+ llvm::make_scope_exit([&] { free(startupinfoex.lpAttributeList); });
+ if (!InitializeProcThreadAttributeList(startupinfoex.lpAttributeList,
+ /*dwAttributeCount=*/1, /*dwFlags=*/0,
+ &attributelist_size)) {
+ error = Status(::GetLastError(), eErrorTypeWin32);
+ return HostProcess();
+ }
+ auto delete_attributelist = llvm::make_scope_exit(
+ [&] { DeleteProcThreadAttributeList(startupinfoex.lpAttributeList); });
+ for (size_t i = 0; i < launch_info.GetNumFileActions(); ++i) {
+ const FileAction *act = launch_info.GetFileActionAtIndex(i);
+ if (act->GetAction() == FileAction::eFileActionDuplicate &&
+ act->GetFD() == act->GetActionArgument())
+ inherited_handles.push_back(reinterpret_cast<HANDLE>(act->GetFD()));
+ }
+ if (!inherited_handles.empty()) {
+ if (!UpdateProcThreadAttribute(
+ startupinfoex.lpAttributeList, /*dwFlags=*/0,
+ PROC_THREAD_ATTRIBUTE_HANDLE_LIST, inherited_handles.data(),
+ inherited_handles.size() * sizeof(HANDLE),
+ /*lpPreviousValue=*/nullptr, /*lpReturnSize=*/nullptr)) {
+ error = Status(::GetLastError(), eErrorTypeWin32);
+ return HostProcess();
+ }
+ }
+
const char *hide_console_var =
getenv("LLDB_LAUNCH_INFERIORS_WITHOUT_CONSOLE");
if (hide_console_var &&
@@ -89,7 +141,8 @@ ProcessLauncherWindows::LaunchProcess(const ProcessLaunchInfo &launch_info,
startupinfo.wShowWindow = SW_HIDE;
}
- DWORD flags = CREATE_NEW_CONSOLE | CREATE_UNICODE_ENVIRONMENT;
+ DWORD flags = CREATE_NEW_CONSOLE | CREATE_UNICODE_ENVIRONMENT |
+ EXTENDED_STARTUPINFO_PRESENT;
if (launch_info.GetFlags().Test(eLaunchFlagDebug))
flags |= DEBUG_ONLY_THIS_PROCESS;
@@ -114,9 +167,10 @@ ProcessLauncherWindows::LaunchProcess(const ProcessLaunchInfo &launch_info,
WCHAR *pwcommandLine = wcommandLine.empty() ? nullptr : &wcommandLine[0];
BOOL result = ::CreateProcessW(
- wexecutable.c_str(), pwcommandLine, NULL, NULL, TRUE, flags, env_block,
+ wexecutable.c_str(), pwcommandLine, NULL, NULL,
+ /*bInheritHandles=*/!inherited_handles.empty(), flags, env_block,
wworkingDirectory.size() == 0 ? NULL : wworkingDirectory.c_str(),
- &startupinfo, &pi);
+ reinterpret_cast<STARTUPINFO *>(&startupinfoex), &pi);
if (!result) {
// Call GetLastError before we make any other system calls.
@@ -131,13 +185,6 @@ ProcessLauncherWindows::LaunchProcess(const ProcessLaunchInfo &launch_info,
::CloseHandle(pi.hThread);
}
- if (stdin_handle)
- ::CloseHandle(stdin_handle);
- if (stdout_handle)
- ::CloseHandle(stdout_handle);
- if (stderr_handle)
- ::CloseHandle(stderr_handle);
-
if (!result)
return HostProcess();
diff --git a/lldb/source/Plugins/ObjectFile/XCOFF/ObjectFileXCOFF.cpp b/lldb/source/Plugins/ObjectFile/XCOFF/ObjectFileXCOFF.cpp
index b54d43c5..1666677 100644
--- a/lldb/source/Plugins/ObjectFile/XCOFF/ObjectFileXCOFF.cpp
+++ b/lldb/source/Plugins/ObjectFile/XCOFF/ObjectFileXCOFF.cpp
@@ -190,7 +190,55 @@ void ObjectFileXCOFF::ParseSymtab(Symtab &lldb_symtab) {}
bool ObjectFileXCOFF::IsStripped() { return false; }
-void ObjectFileXCOFF::CreateSections(SectionList &unified_section_list) {}
+void ObjectFileXCOFF::CreateSections(SectionList &unified_section_list) {
+ if (m_sections_up)
+ return;
+
+ m_sections_up = std::make_unique<SectionList>();
+ ModuleSP module_sp(GetModule());
+
+ if (!module_sp)
+ return;
+
+ std::lock_guard<std::recursive_mutex> guard(module_sp->GetMutex());
+
+ int idx = 0;
+ for (const llvm::object::XCOFFSectionHeader64 &section :
+ m_binary->sections64()) {
+
+ ConstString const_sect_name(section.Name);
+
+ SectionType section_type = lldb::eSectionTypeOther;
+ if (section.Flags & XCOFF::STYP_TEXT)
+ section_type = eSectionTypeCode;
+ else if (section.Flags & XCOFF::STYP_DATA)
+ section_type = eSectionTypeData;
+ else if (section.Flags & XCOFF::STYP_BSS)
+ section_type = eSectionTypeZeroFill;
+ else if (section.Flags & XCOFF::STYP_DWARF) {
+ section_type = llvm::StringSwitch<SectionType>(section.Name)
+ .Case(".dwinfo", eSectionTypeDWARFDebugInfo)
+ .Case(".dwline", eSectionTypeDWARFDebugLine)
+ .Case(".dwabrev", eSectionTypeDWARFDebugAbbrev)
+ .Default(eSectionTypeInvalid);
+ }
+
+ SectionSP section_sp(new Section(
+ module_sp, this, ++idx, const_sect_name, section_type,
+ section.VirtualAddress, section.SectionSize,
+ section.FileOffsetToRawData, section.SectionSize, 0, section.Flags));
+
+ uint32_t permissions = ePermissionsReadable;
+ if (section.Flags & (XCOFF::STYP_DATA | XCOFF::STYP_BSS))
+ permissions |= ePermissionsWritable;
+ if (section.Flags & XCOFF::STYP_TEXT)
+ permissions |= ePermissionsExecutable;
+
+ section_sp->SetPermissions(permissions);
+ m_sections_up->AddSection(section_sp);
+ unified_section_list.AddSection(section_sp);
+ }
+}
void ObjectFileXCOFF::Dump(Stream *s) {}
diff --git a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp
index d8c7e43..332b925 100644
--- a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp
+++ b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp
@@ -924,9 +924,7 @@ Status GDBRemoteCommunication::StartDebugserverProcess(
debugserver_args.AppendArgument(fd_arg.GetString());
// Send "pass_comm_fd" down to the inferior so it can use it to
// communicate back with this process. Ignored on Windows.
-#ifndef _WIN32
launch_info.AppendDuplicateFileAction((int)pass_comm_fd, (int)pass_comm_fd);
-#endif
}
// use native registers, not the GDB registers
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 3b28688..28081e8 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -5355,33 +5355,9 @@ TypeSystemClang::GetNumChildren(lldb::opaque_compiler_type_t type,
assert(record_decl);
const clang::CXXRecordDecl *cxx_record_decl =
llvm::dyn_cast<clang::CXXRecordDecl>(record_decl);
- if (cxx_record_decl) {
- if (omit_empty_base_classes) {
- // Check each base classes to see if it or any of its base classes
- // contain any fields. This can help limit the noise in variable
- // views by not having to show base classes that contain no members.
- clang::CXXRecordDecl::base_class_const_iterator base_class,
- base_class_end;
- for (base_class = cxx_record_decl->bases_begin(),
- base_class_end = cxx_record_decl->bases_end();
- base_class != base_class_end; ++base_class) {
- const clang::CXXRecordDecl *base_class_decl =
- llvm::cast<clang::CXXRecordDecl>(
- base_class->getType()
- ->getAs<clang::RecordType>()
- ->getDecl());
- // Skip empty base classes
- if (!TypeSystemClang::RecordHasFields(base_class_decl))
- continue;
-
- num_children++;
- }
- } else {
- // Include all base classes
- num_children += cxx_record_decl->getNumBases();
- }
- }
+ num_children +=
+ GetNumBaseClasses(cxx_record_decl, omit_empty_base_classes);
num_children += std::distance(record_decl->field_begin(),
record_decl->field_end());
} else
@@ -6184,6 +6160,24 @@ uint32_t TypeSystemClang::GetNumPointeeChildren(clang::QualType type) {
return 0;
}
+llvm::Expected<CompilerType> TypeSystemClang::GetDereferencedType(
+ lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx,
+ std::string &deref_name, uint32_t &deref_byte_size,
+ int32_t &deref_byte_offset, ValueObject *valobj, uint64_t &language_flags) {
+ bool type_valid = IsPointerOrReferenceType(type, nullptr) ||
+ IsArrayType(type, nullptr, nullptr, nullptr);
+ if (!type_valid)
+ return llvm::createStringError("not a pointer, reference or array type");
+ uint32_t child_bitfield_bit_size = 0;
+ uint32_t child_bitfield_bit_offset = 0;
+ bool child_is_base_class;
+ bool child_is_deref_of_parent;
+ return GetChildCompilerTypeAtIndex(
+ type, exe_ctx, 0, false, true, false, deref_name, deref_byte_size,
+ deref_byte_offset, child_bitfield_bit_size, child_bitfield_bit_offset,
+ child_is_base_class, child_is_deref_of_parent, valobj, language_flags);
+}
+
llvm::Expected<CompilerType> TypeSystemClang::GetChildCompilerTypeAtIndex(
lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx, size_t idx,
bool transparent_pointers, bool omit_empty_base_classes,
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
index 9393384..f918cb0 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
@@ -889,6 +889,12 @@ public:
static uint32_t GetNumPointeeChildren(clang::QualType type);
+ llvm::Expected<CompilerType>
+ GetDereferencedType(lldb::opaque_compiler_type_t type,
+ ExecutionContext *exe_ctx, std::string &deref_name,
+ uint32_t &deref_byte_size, int32_t &deref_byte_offset,
+ ValueObject *valobj, uint64_t &language_flags) override;
+
llvm::Expected<CompilerType> GetChildCompilerTypeAtIndex(
lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx, size_t idx,
bool transparent_pointers, bool omit_empty_base_classes,
diff --git a/lldb/source/Symbol/CompilerType.cpp b/lldb/source/Symbol/CompilerType.cpp
index 90c4dbf..dd81fc2 100644
--- a/lldb/source/Symbol/CompilerType.cpp
+++ b/lldb/source/Symbol/CompilerType.cpp
@@ -893,6 +893,18 @@ CompilerDecl CompilerType::GetStaticFieldWithName(llvm::StringRef name) const {
return CompilerDecl();
}
+llvm::Expected<CompilerType> CompilerType::GetDereferencedType(
+ ExecutionContext *exe_ctx, std::string &deref_name,
+ uint32_t &deref_byte_size, int32_t &deref_byte_offset, ValueObject *valobj,
+ uint64_t &language_flags) const {
+ if (IsValid())
+ if (auto type_system_sp = GetTypeSystem())
+ return type_system_sp->GetDereferencedType(
+ m_type, exe_ctx, deref_name, deref_byte_size, deref_byte_offset,
+ valobj, language_flags);
+ return CompilerType();
+}
+
llvm::Expected<CompilerType> CompilerType::GetChildCompilerTypeAtIndex(
ExecutionContext *exe_ctx, size_t idx, bool transparent_pointers,
bool omit_empty_base_classes, bool ignore_array_bounds,
diff --git a/lldb/source/Target/RegisterContextUnwind.cpp b/lldb/source/Target/RegisterContextUnwind.cpp
index 4c760b8..cf4b96c 100644
--- a/lldb/source/Target/RegisterContextUnwind.cpp
+++ b/lldb/source/Target/RegisterContextUnwind.cpp
@@ -248,6 +248,7 @@ void RegisterContextUnwind::InitializeZerothFrame() {
active_row =
m_full_unwind_plan_sp->GetRowForFunctionOffset(m_current_offset);
row_register_kind = m_full_unwind_plan_sp->GetRegisterKind();
+ PropagateTrapHandlerFlagFromUnwindPlan(m_full_unwind_plan_sp);
if (active_row && log) {
StreamString active_row_strm;
active_row->Dump(active_row_strm, m_full_unwind_plan_sp.get(), &m_thread,
@@ -1375,6 +1376,7 @@ RegisterContextUnwind::SavedLocationForRegister(
}
}
+ // Check if the active_row has a register location listed.
if (regnum.IsValid() && active_row &&
active_row->GetRegisterInfo(regnum.GetAsKind(unwindplan_registerkind),
unwindplan_regloc)) {
@@ -1388,11 +1390,10 @@ RegisterContextUnwind::SavedLocationForRegister(
// This is frame 0 and we're retrieving the PC and it's saved in a Return
// Address register and it hasn't been saved anywhere yet -- that is,
// it's still live in the actual register. Handle this specially.
-
if (!have_unwindplan_regloc && return_address_reg.IsValid() &&
- IsFrameZero()) {
- if (return_address_reg.GetAsKind(eRegisterKindLLDB) !=
- LLDB_INVALID_REGNUM) {
+ return_address_reg.GetAsKind(eRegisterKindLLDB) !=
+ LLDB_INVALID_REGNUM) {
+ if (IsFrameZero()) {
lldb_private::UnwindLLDB::ConcreteRegisterLocation new_regloc;
new_regloc.type = UnwindLLDB::ConcreteRegisterLocation::
eRegisterInLiveRegisterContext;
@@ -1406,6 +1407,17 @@ RegisterContextUnwind::SavedLocationForRegister(
return_address_reg.GetAsKind(eRegisterKindLLDB),
return_address_reg.GetAsKind(eRegisterKindLLDB));
return UnwindLLDB::RegisterSearchResult::eRegisterFound;
+ } else if (BehavesLikeZerothFrame()) {
+ // This function was interrupted asynchronously -- it faulted,
+ // an async interrupt, a timer fired, a debugger expression etc.
+ // The caller's pc is in the Return Address register, but the
+ // UnwindPlan for this function may have no location rule for
+ // the RA reg.
+ // This means that the caller's return address is in the RA reg
+ // when the function was interrupted--descend down one stack frame
+ // to retrieve it from the trap handler's saved context.
+ unwindplan_regloc.SetSame();
+ have_unwindplan_regloc = true;
}
}
@@ -1922,6 +1934,7 @@ void RegisterContextUnwind::PropagateTrapHandlerFlagFromUnwindPlan(
}
m_frame_type = eTrapHandlerFrame;
+ UnwindLogMsg("This frame is marked as a trap handler via its UnwindPlan");
if (m_current_offset_backed_up_one != m_current_offset) {
// We backed up the pc by 1 to compute the symbol context, but
diff --git a/lldb/source/ValueObject/ValueObject.cpp b/lldb/source/ValueObject/ValueObject.cpp
index e1c6676..46426ae 100644
--- a/lldb/source/ValueObject/ValueObject.cpp
+++ b/lldb/source/ValueObject/ValueObject.cpp
@@ -2794,75 +2794,62 @@ ValueObjectSP ValueObject::Dereference(Status &error) {
if (m_deref_valobj)
return m_deref_valobj->GetSP();
- const bool is_pointer_or_reference_type = IsPointerOrReferenceType();
- if (is_pointer_or_reference_type) {
- bool omit_empty_base_classes = true;
- bool ignore_array_bounds = false;
-
- std::string child_name_str;
- uint32_t child_byte_size = 0;
- int32_t child_byte_offset = 0;
- uint32_t child_bitfield_bit_size = 0;
- uint32_t child_bitfield_bit_offset = 0;
- bool child_is_base_class = false;
- bool child_is_deref_of_parent = false;
- const bool transparent_pointers = false;
- CompilerType compiler_type = GetCompilerType();
- uint64_t language_flags = 0;
+ std::string deref_name_str;
+ uint32_t deref_byte_size = 0;
+ int32_t deref_byte_offset = 0;
+ CompilerType compiler_type = GetCompilerType();
+ uint64_t language_flags = 0;
- ExecutionContext exe_ctx(GetExecutionContextRef());
+ ExecutionContext exe_ctx(GetExecutionContextRef());
- CompilerType child_compiler_type;
- auto child_compiler_type_or_err = compiler_type.GetChildCompilerTypeAtIndex(
- &exe_ctx, 0, transparent_pointers, omit_empty_base_classes,
- ignore_array_bounds, child_name_str, child_byte_size, child_byte_offset,
- child_bitfield_bit_size, child_bitfield_bit_offset, child_is_base_class,
- child_is_deref_of_parent, this, language_flags);
- if (!child_compiler_type_or_err)
- LLDB_LOG_ERROR(GetLog(LLDBLog::Types),
- child_compiler_type_or_err.takeError(),
- "could not find child: {0}");
- else
- child_compiler_type = *child_compiler_type_or_err;
-
- if (child_compiler_type && child_byte_size) {
- ConstString child_name;
- if (!child_name_str.empty())
- child_name.SetCString(child_name_str.c_str());
-
- m_deref_valobj = new ValueObjectChild(
- *this, child_compiler_type, child_name, child_byte_size,
- child_byte_offset, child_bitfield_bit_size, child_bitfield_bit_offset,
- child_is_base_class, child_is_deref_of_parent, eAddressTypeInvalid,
- language_flags);
- }
+ CompilerType deref_compiler_type;
+ auto deref_compiler_type_or_err = compiler_type.GetDereferencedType(
+ &exe_ctx, deref_name_str, deref_byte_size, deref_byte_offset, this,
+ language_flags);
- // In case of incomplete child compiler type, use the pointee type and try
- // to recreate a new ValueObjectChild using it.
- if (!m_deref_valobj) {
- // FIXME(#59012): C++ stdlib formatters break with incomplete types (e.g.
- // `std::vector<int> &`). Remove ObjC restriction once that's resolved.
- if (Language::LanguageIsObjC(GetPreferredDisplayLanguage()) &&
- HasSyntheticValue()) {
- child_compiler_type = compiler_type.GetPointeeType();
-
- if (child_compiler_type) {
- ConstString child_name;
- if (!child_name_str.empty())
- child_name.SetCString(child_name_str.c_str());
-
- m_deref_valobj = new ValueObjectChild(
- *this, child_compiler_type, child_name, child_byte_size,
- child_byte_offset, child_bitfield_bit_size,
- child_bitfield_bit_offset, child_is_base_class,
- child_is_deref_of_parent, eAddressTypeInvalid, language_flags);
- }
+ std::string deref_error;
+ if (deref_compiler_type_or_err) {
+ deref_compiler_type = *deref_compiler_type_or_err;
+ } else {
+ deref_error = llvm::toString(deref_compiler_type_or_err.takeError());
+ LLDB_LOG(GetLog(LLDBLog::Types), "could not find child: {0}", deref_error);
+ }
+
+ if (deref_compiler_type && deref_byte_size) {
+ ConstString deref_name;
+ if (!deref_name_str.empty())
+ deref_name.SetCString(deref_name_str.c_str());
+
+ m_deref_valobj =
+ new ValueObjectChild(*this, deref_compiler_type, deref_name,
+ deref_byte_size, deref_byte_offset, 0, 0, false,
+ true, eAddressTypeInvalid, language_flags);
+ }
+
+ // In case of incomplete deref compiler type, use the pointee type and try
+ // to recreate a new ValueObjectChild using it.
+ if (!m_deref_valobj) {
+ // FIXME(#59012): C++ stdlib formatters break with incomplete types (e.g.
+ // `std::vector<int> &`). Remove ObjC restriction once that's resolved.
+ if (Language::LanguageIsObjC(GetPreferredDisplayLanguage()) &&
+ HasSyntheticValue()) {
+ deref_compiler_type = compiler_type.GetPointeeType();
+
+ if (deref_compiler_type) {
+ ConstString deref_name;
+ if (!deref_name_str.empty())
+ deref_name.SetCString(deref_name_str.c_str());
+
+ m_deref_valobj = new ValueObjectChild(
+ *this, deref_compiler_type, deref_name, deref_byte_size,
+ deref_byte_offset, 0, 0, false, true, eAddressTypeInvalid,
+ language_flags);
}
}
+ }
- } else if (IsSynthetic()) {
+ if (!m_deref_valobj && IsSynthetic())
m_deref_valobj = GetChildMemberWithName("$$dereference$$").get();
- }
if (m_deref_valobj) {
error.Clear();
@@ -2871,13 +2858,13 @@ ValueObjectSP ValueObject::Dereference(Status &error) {
StreamString strm;
GetExpressionPath(strm);
- if (is_pointer_or_reference_type)
+ if (deref_error.empty())
error = Status::FromErrorStringWithFormat(
"dereference failed: (%s) %s",
GetTypeName().AsCString("<invalid type>"), strm.GetData());
else
error = Status::FromErrorStringWithFormat(
- "not a pointer or reference type: (%s) %s",
+ "dereference failed: %s: (%s) %s", deref_error.c_str(),
GetTypeName().AsCString("<invalid type>"), strm.GetData());
return ValueObjectSP();
}
diff --git a/lldb/test/API/commands/frame/var-dil/basics/PointerArithmetic/TestFrameVarDILPointerArithmetic.py b/lldb/test/API/commands/frame/var-dil/basics/PointerArithmetic/TestFrameVarDILPointerArithmetic.py
index d36c5fc..6753f98 100644
--- a/lldb/test/API/commands/frame/var-dil/basics/PointerArithmetic/TestFrameVarDILPointerArithmetic.py
+++ b/lldb/test/API/commands/frame/var-dil/basics/PointerArithmetic/TestFrameVarDILPointerArithmetic.py
@@ -33,11 +33,7 @@ class TestFrameVarDILGlobalVariableLookup(TestBase):
self.expect_var_path("*offset_pref", True, type="int *")
self.expect_var_path("**pp_int0", value="0")
self.expect_var_path("&**pp_int0", type="int *")
- self.expect(
- "frame var '*array'",
- error=True,
- substrs=["not a pointer or reference type"],
- )
+ self.expect_var_path("*array", value="0")
self.expect(
"frame var '&*p_null'",
error=True,
diff --git a/lldb/test/API/functionalities/completion/TestCompletion.py b/lldb/test/API/functionalities/completion/TestCompletion.py
index bf043c7..e7c5372 100644
--- a/lldb/test/API/functionalities/completion/TestCompletion.py
+++ b/lldb/test/API/functionalities/completion/TestCompletion.py
@@ -334,22 +334,22 @@ class CommandLineCompletionTestCase(TestBase):
"settings replace target.ru", "settings replace target.run-args"
)
- def test_settings_show_term(self):
+ def test_settings_show_term_width(self):
self.complete_from_to("settings show term-w", "settings show term-width")
- def test_settings_list_term(self):
+ def test_settings_list_term_width(self):
self.complete_from_to("settings list term-w", "settings list term-width")
- def test_settings_show_term(self):
+ def test_settings_show_term_height(self):
self.complete_from_to("settings show term-h", "settings show term-height")
- def test_settings_list_term(self):
+ def test_settings_list_term_height(self):
self.complete_from_to("settings list term-h", "settings list term-height")
- def test_settings_remove_term(self):
+ def test_settings_remove_term_width(self):
self.complete_from_to("settings remove term-w", "settings remove term-width")
- def test_settings_remove_term(self):
+ def test_settings_remove_term_height(self):
self.complete_from_to("settings remove term-h", "settings remove term-height")
def test_settings_s(self):
diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/optional/TestDataFormatterGenericOptional.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/optional/TestDataFormatterGenericOptional.py
index 7dc656a..99d79a9 100644
--- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/optional/TestDataFormatterGenericOptional.py
+++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/optional/TestDataFormatterGenericOptional.py
@@ -88,7 +88,7 @@ class GenericOptionalDataFormatterTestCase(TestBase):
self.expect(
"frame variable *number_not_engaged",
error=True,
- substrs=["not a pointer or reference type"],
+ substrs=["dereference failed: not a pointer, reference or array type"],
)
@add_test_categories(["libc++"])
diff --git a/lldb/test/API/functionalities/unwind/frameless-faulted/Makefile b/lldb/test/API/functionalities/unwind/frameless-faulted/Makefile
new file mode 100644
index 0000000..954c184
--- /dev/null
+++ b/lldb/test/API/functionalities/unwind/frameless-faulted/Makefile
@@ -0,0 +1,13 @@
+C_SOURCES := main.c
+
+interrupt-and-trap-funcs.o: interrupt-and-trap-funcs.s
+ $(CC) $(CFLAGS) -E -o interrupt-and-trap-funcs.s $(SRCDIR)/interrupt-and-trap-funcs.s
+ $(CC) $(CFLAGS) -c -o interrupt-and-trap-funcs.o interrupt-and-trap-funcs.s
+
+include Makefile.rules
+
+a.out: interrupt-and-trap-funcs.o
+
+# Needs to come after include
+OBJECTS += interrupt-and-trap-funcs.o
+
diff --git a/lldb/test/API/functionalities/unwind/frameless-faulted/TestUnwindFramelessFaulted.py b/lldb/test/API/functionalities/unwind/frameless-faulted/TestUnwindFramelessFaulted.py
new file mode 100644
index 0000000..483a487
--- /dev/null
+++ b/lldb/test/API/functionalities/unwind/frameless-faulted/TestUnwindFramelessFaulted.py
@@ -0,0 +1,128 @@
+"""Test that lldb backtraces a frameless function that faults correctly."""
+
+import lldbsuite.test.lldbutil as lldbutil
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test.decorators import *
+import shutil
+import os
+
+
+class TestUnwindFramelessFaulted(TestBase):
+ NO_DEBUG_INFO_TESTCASE = True
+
+ @skipIf(oslist=no_match([lldbplatformutil.getDarwinOSTriples()]))
+ @skipIf(archs=no_match(["aarch64", "arm64", "arm64e"]))
+
+ # The static linker in Xcode 15.0-15.2 on macOS 14 will mislink
+ # the eh_frame addresses; ld-classic in those tools is one workaround.
+ # This issue was fixed in Xcode 15.3, but it's not straightforward
+ # to test for the linker version or Xcode version so tie this to
+ # macOS 15 which uses Xcode 16 and does not have the issues.
+ @skipIf(macos_version=["<", "15.0"])
+
+ def test_frameless_faulted_unwind(self):
+ self.build()
+
+ (target, process, thread, bp) = lldbutil.run_to_name_breakpoint(
+ self, "main", only_one_thread=False
+ )
+
+ # The test program will have a backtrace like this at its deepest:
+ #
+ # * frame #0: 0x0000000102adc468 a.out`break_to_debugger + 4
+ # frame #1: 0x0000000102adc458 a.out`trap + 16
+ # frame #2: 0x0000000102adc440 a.out`to_be_interrupted + 20
+ # frame #3: 0x0000000102adc418 a.out`main at main.c:4:7
+ # frame #4: 0x0000000193b7eb4c dyld`start + 6000
+
+ correct_frames = ["break_to_debugger", "trap", "to_be_interrupted", "main"]
+
+ # Keep track of when main has branch & linked, instruction step until we're
+ # back in main()
+ main_has_bl_ed = False
+
+ # Instruction step through the binary until we are in a function not
+ # listed in correct_frames.
+ frame = thread.GetFrameAtIndex(0)
+ step_count = 0
+ max_step_count = 200
+ while (
+ process.GetState() == lldb.eStateStopped
+ and frame.name in correct_frames
+ and step_count < max_step_count
+ ):
+ starting_index = 0
+ if self.TraceOn():
+ self.runCmd("bt")
+
+ # Find which index into correct_frames the current stack frame is
+ for idx, name in enumerate(correct_frames):
+ if frame.name == name:
+ starting_index = idx
+
+ # Test that all frames after the current frame listed in
+ # correct_frames appears in the backtrace.
+ frame_idx = 0
+ for expected_frame in correct_frames[starting_index:]:
+ self.assertEqual(thread.GetFrameAtIndex(frame_idx).name, expected_frame)
+ frame_idx = frame_idx + 1
+
+ # When we're at our deepest level, test that register passing of
+ # x0 and x20 follow the by-hand UnwindPlan rules.
+ # In this test program, we can get x0 in the middle of the stack
+ # and we CAN'T get x20. The opposites of the normal AArch64 SysV
+ # ABI.
+ if frame.name == "break_to_debugger":
+ tbi_frame = thread.GetFrameAtIndex(2)
+ self.assertEqual(tbi_frame.name, "to_be_interrupted")
+ # The original argument to to_be_interrupted(), 10
+ # Normally can't get x0 mid-stack, but UnwindPlans have
+ # special rules to make this possible.
+ x0_reg = tbi_frame.register["x0"]
+ self.assertTrue(x0_reg.IsValid())
+ self.assertEqual(x0_reg.GetValueAsUnsigned(), 10)
+ # The incremented return value from to_be_interrupted(), 11
+ x24_reg = tbi_frame.register["x24"]
+ self.assertTrue(x24_reg.IsValid())
+ self.assertEqual(x24_reg.GetValueAsUnsigned(), 11)
+ # x20 can normally be fetched mid-stack, but the UnwindPlan
+ # has a rule saying it can't be fetched.
+ x20_reg = tbi_frame.register["x20"]
+ self.assertTrue(x20_reg.error.fail)
+
+ trap_frame = thread.GetFrameAtIndex(1)
+ self.assertEqual(trap_frame.name, "trap")
+ # Confirm that we can fetch x0 from trap() which
+ # is normally not possible w/ SysV AbI, but special
+ # UnwindPlans in use.
+ x0_reg = trap_frame.register["x0"]
+ self.assertTrue(x0_reg.IsValid())
+ self.assertEqual(x0_reg.GetValueAsUnsigned(), 10)
+ x1_reg = trap_frame.register["x1"]
+ self.assertTrue(x1_reg.error.fail)
+
+ main_frame = thread.GetFrameAtIndex(3)
+ self.assertEqual(main_frame.name, "main")
+ # x20 can normally be fetched mid-stack, but the UnwindPlan
+ # has a rule saying it can't be fetched.
+ x20_reg = main_frame.register["x20"]
+ self.assertTrue(x20_reg.error.fail)
+ # x21 can be fetched mid-stack.
+ x21_reg = main_frame.register["x21"]
+ self.assertTrue(x21_reg.error.success)
+
+ # manually move past the BRK instruction in
+ # break_to_debugger(). lldb-server doesn't
+ # advance past the builtin_debugtrap() BRK
+ # instruction.
+ if (
+ thread.GetStopReason() == lldb.eStopReasonException
+ and frame.name == "break_to_debugger"
+ ):
+ frame.SetPC(frame.GetPC() + 4)
+
+ if self.TraceOn():
+ print("StepInstruction")
+ thread.StepInstruction(False)
+ frame = thread.GetFrameAtIndex(0)
+ step_count = step_count + 1
diff --git a/lldb/test/API/functionalities/unwind/frameless-faulted/interrupt-and-trap-funcs.s b/lldb/test/API/functionalities/unwind/frameless-faulted/interrupt-and-trap-funcs.s
new file mode 100644
index 0000000..13bb473
--- /dev/null
+++ b/lldb/test/API/functionalities/unwind/frameless-faulted/interrupt-and-trap-funcs.s
@@ -0,0 +1,135 @@
+// This is assembly code that needs to be run
+// through the preprocessor, for simplicity of
+// preprocessing it's named .c to start with.
+//
+// clang-format off
+
+
+#define DW_CFA_register 0x9
+#define ehframe_x0 0
+#define ehframe_x20 20
+#define ehframe_x22 22
+#define ehframe_x23 23
+#define ehframe_pc 32
+
+#if defined(__APPLE__)
+#define TO_BE_INTERRUPTED _to_be_interrupted
+#define TRAP _trap
+#define BREAK_TO_DEBUGGER _break_to_debugger
+#else
+#define TO_BE_INTERRUPTED to_be_interrupted
+#define TRAP trap
+#define BREAK_TO_DEBUGGER break_to_debugger
+#endif
+
+ .text
+//--------------------------------------
+// to_be_interrupted() a frameless function that does a non-ABI
+// function call to trap(), simulating an async signal/interrup/exception/fault.
+// Before it branches to trap(), put the return address in x23.
+// trap() knows to branch back to $x23 when it has finished.
+//--------------------------------------
+ .globl TO_BE_INTERRUPTED
+#if defined(__APPLE__)
+ .p2align 2
+#endif
+TO_BE_INTERRUPTED:
+ .cfi_startproc
+
+ // This is a garbage entry to ensure that eh_frame is emitted.
+ // If there's no eh_frame, lldb can use the assembly emulation scan,
+ // which always includes a rule for $lr, and we won't replicate the
+ // bug we're testing for.
+ .cfi_escape DW_CFA_register, ehframe_x22, ehframe_x23
+ mov x24, x0
+ add x24, x24, #1
+
+#if defined(__APPLE__)
+ adrp x23, L_.return@PAGE // put return address in x23
+ add x23, x23, L_.return@PAGEOFF
+#else
+ adrp x23, .L.return
+ add x23, x23, :lo12:.L.return
+#endif
+
+ b TRAP // branch to trap handler, fake async interrupt
+
+#if defined(__APPLE__)
+L_.return:
+#else
+.L.return:
+#endif
+ mov x0, x24
+ ret
+ .cfi_endproc
+
+
+
+//--------------------------------------
+// trap() trap handler function, sets up stack frame
+// with special unwind rule for the pc value of the
+// "interrupted" stack frame (it's in x23), then calls
+// break_to_debugger().
+//--------------------------------------
+ .globl TRAP
+#if defined(__APPLE__)
+ .p2align 2
+#endif
+TRAP:
+ .cfi_startproc
+ .cfi_signal_frame
+
+ // The pc value when we were interrupted is in x23
+ .cfi_escape DW_CFA_register, ehframe_pc, ehframe_x23
+
+ // For fun, mark x0 as unmodified so the caller can
+ // retrieve the value if it wants.
+ .cfi_same_value ehframe_x0
+
+ // Mark x20 as undefined. This is a callee-preserved
+ // (non-volatile) register by the SysV AArch64 ABI, but
+ // it'll be fun to see lldb not passing a value past this
+ // point on the stack.
+ .cfi_undefined ehframe_x20
+
+ // standard prologue save of fp & lr so we can call
+ // break_to_debugger()
+ sub sp, sp, #32
+ stp x29, x30, [sp, #16]
+ add x29, sp, #16
+ .cfi_def_cfa w29, 16
+ .cfi_offset w30, -8
+ .cfi_offset w29, -16
+
+ bl BREAK_TO_DEBUGGER
+
+ ldp x29, x30, [sp, #16]
+ .cfi_same_value x29
+ .cfi_same_value x30
+ .cfi_def_cfa sp, 32
+ add sp, sp, #32
+ .cfi_same_value sp
+ .cfi_def_cfa sp, 0
+
+ // jump back to $x23 to resume execution of to_be_interrupted
+ br x23
+ .cfi_endproc
+
+//--------------------------------------
+// break_to_debugger() executes a BRK instruction
+//--------------------------------------
+ .globl BREAK_TO_DEBUGGER
+#if defined(__APPLE__)
+ .p2align 2
+#endif
+BREAK_TO_DEBUGGER:
+ .cfi_startproc
+
+ // For fun, mark x0 as unmodified so the caller can
+ // retrieve the value if it wants.
+ .cfi_same_value ehframe_x0
+
+ brk #0xf000 // __builtin_debugtrap aarch64 instruction
+
+ ret
+ .cfi_endproc
diff --git a/lldb/test/API/functionalities/unwind/frameless-faulted/main.c b/lldb/test/API/functionalities/unwind/frameless-faulted/main.c
new file mode 100644
index 0000000..e5f690a
--- /dev/null
+++ b/lldb/test/API/functionalities/unwind/frameless-faulted/main.c
@@ -0,0 +1,7 @@
+int to_be_interrupted(int);
+
+int main() {
+ int c = 10;
+ c = to_be_interrupted(c);
+ return c;
+}
diff --git a/lldb/test/Shell/ObjectFile/XCOFF/basic-info.yaml b/lldb/test/Shell/ObjectFile/XCOFF/basic-info.yaml
index 3c0037db..17ff2f3 100644
--- a/lldb/test/Shell/ObjectFile/XCOFF/basic-info.yaml
+++ b/lldb/test/Shell/ObjectFile/XCOFF/basic-info.yaml
@@ -7,11 +7,32 @@
# CHECK: Stripped: false
# CHECK: Type: executable
# CHECK: Strata: unknown
+# CHECK: Name: .text
+# CHECK-NEXT: Type: code
+# CHECK-NEXT: Permissions: r-x
+# CHECK: Name: .data
+# CHECK-NEXT: Type: data
+# CHECK-NEXT: Permissions: rw-
+# CHECK: Name: .bss
+# CHECK-NEXT: Type: zero-fill
+# CHECK-NEXT: Permissions: rw-
+# CHECK: Name: .loader
+# CHECK-NEXT: Type: regular
+# CHECK-NEXT: Permissions: r--
+# CHECK: Name: .dwline
+# CHECK-NEXT: Type: dwarf-line
+# CHECK-NEXT: Permissions: r--
+# CHECK: Name: .dwinfo
+# CHECK-NEXT: Type: dwarf-info
+# CHECK-NEXT: Permissions: r--
+# CHECK: Name: .dwabrev
+# CHECK-NEXT: Type: dwarf-abbrev
+# CHECK-NEXT: Permissions: r--
--- !XCOFF
FileHeader:
MagicNumber: 0x1F7
- NumberOfSections: 1
+ NumberOfSections: 7
CreationTime: 000000000
Flags: 0x0002
Sections:
@@ -22,6 +43,66 @@ Sections:
FileOffsetToLineNumbers: 0x0
NumberOfLineNumbers: 0x0
Flags: [ STYP_TEXT ]
- SectionData: E8C20000E94204
+ SectionData: E8C20000
+ - Name: .data
+ Address: 0x1100008D2
+ Size: 0x2AE
+ FileOffsetToData: 0x8D2
+ FileOffsetToRelocations: 0x132E
+ FileOffsetToLineNumbers: 0x0
+ NumberOfRelocations: 0x22
+ NumberOfLineNumbers: 0x0
+ Flags: [ STYP_DATA ]
+ SectionData: ''
+ - Name: .bss
+ Address: 0x110000B80
+ Size: 0x28
+ FileOffsetToData: 0x0
+ FileOffsetToRelocations: 0x0
+ FileOffsetToLineNumbers: 0x0
+ NumberOfRelocations: 0x0
+ NumberOfLineNumbers: 0x0
+ Flags: [ STYP_BSS ]
+ SectionData: ''
+ - Name: .loader
+ Address: 0x0
+ Size: 0x413
+ FileOffsetToData: 0xB80
+ FileOffsetToRelocations: 0x0
+ FileOffsetToLineNumbers: 0x0
+ NumberOfRelocations: 0x0
+ NumberOfLineNumbers: 0x0
+ Flags: [ STYP_LOADER ]
+ SectionData: 00000001
+ - Name: .dwline
+ Address: 0x0
+ Size: 0x9C
+ FileOffsetToData: 0xF94
+ FileOffsetToRelocations: 0x150A
+ FileOffsetToLineNumbers: 0x0
+ NumberOfRelocations: 0x5
+ NumberOfLineNumbers: 0x0
+ Flags: [ STYP_DWARF ]
+ SectionData: FFFFFFFF
+ - Name: .dwinfo
+ Address: 0x0
+ Size: 0xDD
+ FileOffsetToData: 0x1030
+ FileOffsetToRelocations: 0x1550
+ FileOffsetToLineNumbers: 0x0
+ NumberOfRelocations: 0x6
+ NumberOfLineNumbers: 0x0
+ Flags: [ STYP_DWARF ]
+ SectionData: FFFFFFFF
+ - Name: .dwabrev
+ Address: 0x0
+ Size: 0x43
+ FileOffsetToData: 0x110E
+ FileOffsetToRelocations: 0x0
+ FileOffsetToLineNumbers: 0x0
+ NumberOfRelocations: 0x0
+ NumberOfLineNumbers: 0x0
+ Flags: [ STYP_DWARF ]
+ SectionData: 01110125
StringTable: {}
...
diff --git a/lldb/tools/lldb-dap/CMakeLists.txt b/lldb/tools/lldb-dap/CMakeLists.txt
index a9dc190..5dedee8 100644
--- a/lldb/tools/lldb-dap/CMakeLists.txt
+++ b/lldb/tools/lldb-dap/CMakeLists.txt
@@ -1,20 +1,11 @@
-if(APPLE)
- configure_file(
- ${CMAKE_CURRENT_SOURCE_DIR}/lldb-dap-Info.plist.in
- ${CMAKE_CURRENT_BINARY_DIR}/lldb-dap-Info.plist
- )
- # Inline info plist in binary (use target_link_options for this as soon as CMake 3.13 is available)
- set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-sectcreate,__TEXT,__info_plist,${CMAKE_CURRENT_BINARY_DIR}/lldb-dap-Info.plist")
-endif()
-
# We need to include the llvm components we depend on manually, as liblldb does
# not re-export those.
set(LLVM_LINK_COMPONENTS Support)
set(LLVM_TARGET_DEFINITIONS Options.td)
tablegen(LLVM Options.inc -gen-opt-parser-defs)
add_public_tablegen_target(LLDBDAPOptionsTableGen)
-add_lldb_tool(lldb-dap
- lldb-dap.cpp
+
+add_lldb_library(lldbDAP
Breakpoint.cpp
BreakpointBase.cpp
DAP.cpp
@@ -85,23 +76,13 @@ add_lldb_tool(lldb-dap
Support
)
-target_include_directories(lldb-dap PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
+target_include_directories(lldbDAP
+ PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
if(LLDB_DAP_WELCOME_MESSAGE)
- target_compile_definitions(lldb-dap
+ target_compile_definitions(lldbDAP
PRIVATE
-DLLDB_DAP_WELCOME_MESSAGE=\"${LLDB_DAP_WELCOME_MESSAGE}\")
endif()
-if(LLDB_BUILD_FRAMEWORK)
- # In the build-tree, we know the exact path to the framework directory.
- # The installed framework can be in different locations.
- lldb_setup_rpaths(lldb-dap
- BUILD_RPATH
- "${LLDB_FRAMEWORK_ABSOLUTE_BUILD_DIR}"
- INSTALL_RPATH
- "@loader_path/../../../SharedFrameworks"
- "@loader_path/../../System/Library/PrivateFrameworks"
- "@loader_path/../../Library/PrivateFrameworks"
- )
-endif()
+add_subdirectory(tool)
diff --git a/lldb/tools/lldb-dap/JSONUtils.cpp b/lldb/tools/lldb-dap/JSONUtils.cpp
index bd2e663..279e6d3 100644
--- a/lldb/tools/lldb-dap/JSONUtils.cpp
+++ b/lldb/tools/lldb-dap/JSONUtils.cpp
@@ -99,19 +99,6 @@ std::optional<bool> GetBoolean(const llvm::json::Object *obj,
return std::nullopt;
}
-std::optional<int64_t> GetSigned(const llvm::json::Object &obj,
- llvm::StringRef key) {
- return obj.getInteger(key);
-}
-
-std::optional<int64_t> GetSigned(const llvm::json::Object *obj,
- llvm::StringRef key) {
- if (obj == nullptr)
- return std::nullopt;
-
- return GetSigned(*obj, key);
-}
-
bool ObjectContainsKey(const llvm::json::Object &obj, llvm::StringRef key) {
return obj.find(key) != obj.end();
}
@@ -263,7 +250,7 @@ void FillResponse(const llvm::json::Object &request,
response.try_emplace("seq", (int64_t)0);
EmplaceSafeString(response, "command",
GetString(request, "command").value_or(""));
- const int64_t seq = GetSigned(request, "seq").value_or(0);
+ const uint64_t seq = GetInteger<uint64_t>(request, "seq").value_or(0);
response.try_emplace("request_seq", seq);
response.try_emplace("success", true);
}
diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp
index 2b74199..c9cab35 100644
--- a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp
+++ b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp
@@ -69,6 +69,16 @@ llvm::json::Value toJSON(const Source &S) {
return result;
}
+bool fromJSON(const llvm::json::Value &Params, ExceptionBreakpointsFilter &EBF,
+ llvm::json::Path P) {
+ json::ObjectMapper O(Params, P);
+ return O && O.map("filter", EBF.filter) && O.map("label", EBF.label) &&
+ O.mapOptional("description", EBF.description) &&
+ O.mapOptional("default", EBF.defaultState) &&
+ O.mapOptional("supportsCondition", EBF.supportsCondition) &&
+ O.mapOptional("conditionDescription", EBF.conditionDescription);
+}
+
json::Value toJSON(const ExceptionBreakpointsFilter &EBF) {
json::Object result{{"filter", EBF.filter}, {"label", EBF.label}};
diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h
index 1f0cb1e..d1e86b0 100644
--- a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h
+++ b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h
@@ -55,6 +55,8 @@ struct ExceptionBreakpointsFilter {
/// shown as the placeholder text for a text box and can be translated.
std::optional<std::string> conditionDescription;
};
+bool fromJSON(const llvm::json::Value &, ExceptionBreakpointsFilter &,
+ llvm::json::Path);
llvm::json::Value toJSON(const ExceptionBreakpointsFilter &);
enum ColumnType : unsigned {
diff --git a/lldb/tools/lldb-dap/tool/CMakeLists.txt b/lldb/tools/lldb-dap/tool/CMakeLists.txt
new file mode 100644
index 0000000..b39a4ed
--- /dev/null
+++ b/lldb/tools/lldb-dap/tool/CMakeLists.txt
@@ -0,0 +1,28 @@
+add_lldb_tool(lldb-dap
+ lldb-dap.cpp
+
+ LINK_LIBS
+ lldbDAP
+ )
+
+if(APPLE)
+ configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/lldb-dap-Info.plist.in
+ ${CMAKE_CURRENT_BINARY_DIR}/lldb-dap-Info.plist
+ )
+ target_link_options(lldb-dap
+ PRIVATE LINKER:-sectcreate,__TEXT,__info_plist,${CMAKE_CURRENT_BINARY_DIR}/lldb-dap-Info.plist)
+endif()
+
+if(LLDB_BUILD_FRAMEWORK)
+ # In the build-tree, we know the exact path to the framework directory.
+ # The installed framework can be in different locations.
+ lldb_setup_rpaths(lldb-dap
+ BUILD_RPATH
+ "${LLDB_FRAMEWORK_ABSOLUTE_BUILD_DIR}"
+ INSTALL_RPATH
+ "@loader_path/../../../SharedFrameworks"
+ "@loader_path/../../System/Library/PrivateFrameworks"
+ "@loader_path/../../Library/PrivateFrameworks"
+ )
+endif()
diff --git a/lldb/tools/lldb-dap/lldb-dap-Info.plist.in b/lldb/tools/lldb-dap/tool/lldb-dap-Info.plist.in
index 7d01d31..7d01d31 100644
--- a/lldb/tools/lldb-dap/lldb-dap-Info.plist.in
+++ b/lldb/tools/lldb-dap/tool/lldb-dap-Info.plist.in
diff --git a/lldb/tools/lldb-dap/lldb-dap.cpp b/lldb/tools/lldb-dap/tool/lldb-dap.cpp
index 7a4cc70..7a4cc70 100644
--- a/lldb/tools/lldb-dap/lldb-dap.cpp
+++ b/lldb/tools/lldb-dap/tool/lldb-dap.cpp
diff --git a/lldb/tools/lldb-server/lldb-platform.cpp b/lldb/tools/lldb-server/lldb-platform.cpp
index 10d79c6..5b0a8ad 100644
--- a/lldb/tools/lldb-server/lldb-platform.cpp
+++ b/lldb/tools/lldb-server/lldb-platform.cpp
@@ -274,10 +274,8 @@ static Status spawn_process(const char *progname, const FileSpec &prog,
self_args.AppendArgument(llvm::StringRef("platform"));
self_args.AppendArgument(llvm::StringRef("--child-platform-fd"));
self_args.AppendArgument(llvm::to_string(shared_socket.GetSendableFD()));
-#ifndef _WIN32
launch_info.AppendDuplicateFileAction((int)shared_socket.GetSendableFD(),
(int)shared_socket.GetSendableFD());
-#endif
if (gdb_port) {
self_args.AppendArgument(llvm::StringRef("--gdbserver-port"));
self_args.AppendArgument(llvm::to_string(gdb_port));
diff --git a/lldb/unittests/CMakeLists.txt b/lldb/unittests/CMakeLists.txt
index cc9d45e..d8f9cc7 100644
--- a/lldb/unittests/CMakeLists.txt
+++ b/lldb/unittests/CMakeLists.txt
@@ -48,8 +48,9 @@ endfunction()
add_subdirectory(TestingSupport)
if (NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
- # FIXME: APITests.exe is not a valid googletest binary.
+ # FIXME: Tests linking against libLLDB don't work on Windows.
add_subdirectory(API)
+ add_subdirectory(DAP)
endif()
add_subdirectory(Breakpoint)
add_subdirectory(Callback)
diff --git a/lldb/unittests/DAP/CMakeLists.txt b/lldb/unittests/DAP/CMakeLists.txt
new file mode 100644
index 0000000..8b24065
--- /dev/null
+++ b/lldb/unittests/DAP/CMakeLists.txt
@@ -0,0 +1,11 @@
+add_lldb_unittest(DAPTests
+ JSONUtilsTest.cpp
+ LLDBUtilsTest.cpp
+ ProtocolTypesTest.cpp
+
+ LINK_LIBS
+ lldbDAP
+ LLVMTestingSupport
+ LINK_COMPONENTS
+ Support
+ )
diff --git a/lldb/unittests/DAP/JSONUtilsTest.cpp b/lldb/unittests/DAP/JSONUtilsTest.cpp
new file mode 100644
index 0000000..ce4be08
--- /dev/null
+++ b/lldb/unittests/DAP/JSONUtilsTest.cpp
@@ -0,0 +1,195 @@
+//===-- JSONUtilsTest.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "JSONUtils.h"
+#include "lldb/API/SBModule.h"
+#include "lldb/API/SBTarget.h"
+#include "llvm/Support/JSON.h"
+#include "gtest/gtest.h"
+#include <optional>
+
+using namespace llvm;
+using namespace lldb;
+using namespace lldb_dap;
+
+TEST(JSONUtilsTest, GetAsString) {
+ json::Value string_value("foo");
+ EXPECT_EQ(GetAsString(string_value), "foo");
+
+ json::Value int_value(42);
+ EXPECT_EQ(GetAsString(int_value), "");
+
+ json::Value null_value(nullptr);
+ EXPECT_EQ(GetAsString(null_value), "");
+}
+
+TEST(JSONUtilsTest, GetString_Ref) {
+ json::Object obj;
+ obj.try_emplace("key", "value");
+
+ auto result = GetString(obj, "key");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), "value");
+
+ result = GetString(obj, "nonexistent_key");
+ EXPECT_FALSE(result.has_value());
+}
+
+TEST(JSONUtilsTest, GetString_Pointer) {
+ json::Object obj;
+ obj.try_emplace("key", "value");
+
+ auto result = GetString(&obj, "key");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), "value");
+
+ result = GetString(nullptr, "key");
+ EXPECT_FALSE(result.has_value());
+}
+
+TEST(JSONUtilsTest, GetBoolean_Ref) {
+ json::Object obj;
+ obj.try_emplace("key_true", true);
+ obj.try_emplace("key_false", false);
+ obj.try_emplace("key_int", 1);
+
+ auto result = GetBoolean(obj, "key_true");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_TRUE(result.value());
+
+ result = GetBoolean(obj, "key_false");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_FALSE(result.value());
+
+ result = GetBoolean(obj, "key_int");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_TRUE(result.value());
+
+ result = GetBoolean(obj, "nonexistent_key");
+ EXPECT_FALSE(result.has_value());
+}
+
+TEST(JSONUtilsTest, GetBoolean_Pointer) {
+ json::Object obj;
+ obj.try_emplace("key", true);
+
+ auto result = GetBoolean(&obj, "key");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_TRUE(result.value());
+
+ result = GetBoolean(nullptr, "key");
+ EXPECT_FALSE(result.has_value());
+}
+
+TEST(JSONUtilsTest, GetInteger_Ref) {
+ json::Object obj;
+ obj.try_emplace("key", 123);
+
+ auto result = GetInteger<int>(obj, "key");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), 123);
+
+ result = GetInteger<int>(obj, "nonexistent_key");
+ EXPECT_FALSE(result.has_value());
+
+ obj.try_emplace("key_float", 123.45);
+ result = GetInteger<int>(obj, "key_float");
+ EXPECT_FALSE(result.has_value());
+
+ obj.try_emplace("key_string", "123");
+ result = GetInteger<int>(obj, "key_string");
+ EXPECT_FALSE(result.has_value());
+}
+
+TEST(JSONUtilsTest, GetInteger_Pointer) {
+ json::Object obj;
+ obj.try_emplace("key", 456);
+
+ auto result = GetInteger<int>(&obj, "key");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), 456);
+
+ result = GetInteger<int>(nullptr, "key");
+ EXPECT_FALSE(result.has_value());
+
+ obj.try_emplace("key_invalid", "not_an_integer");
+ result = GetInteger<int>(&obj, "key_invalid");
+ EXPECT_FALSE(result.has_value());
+}
+
+TEST(JSONUtilsTest, GetInteger_DifferentTypes) {
+ json::Object obj;
+ obj.try_emplace("key", 789);
+
+ auto result = GetInteger<int64_t>(obj, "key");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), 789);
+
+ result = GetInteger<uint32_t>(obj, "key");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), 789U);
+
+ result = GetInteger<int16_t>(obj, "key");
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), static_cast<int16_t>(789));
+}
+
+TEST(JSONUtilsTest, CreateModule) {
+ SBTarget target;
+ SBModule module;
+
+ json::Value value = CreateModule(target, module);
+ json::Object *object = value.getAsObject();
+
+ ASSERT_NE(object, nullptr);
+ EXPECT_EQ(object->size(), 0UL);
+}
+
+TEST(JSONUtilsTest, GetStrings_EmptyArray) {
+ llvm::json::Object obj;
+ obj.try_emplace("key", llvm::json::Array());
+ auto result = GetStrings(&obj, "key");
+ EXPECT_TRUE(result.empty());
+}
+
+TEST(JSONUtilsTest, GetStrings_NullKey) {
+ llvm::json::Object obj;
+ auto result = GetStrings(&obj, "nonexistent_key");
+ EXPECT_TRUE(result.empty());
+}
+
+TEST(JSONUtilsTest, GetStrings_StringValues) {
+ llvm::json::Object obj;
+ llvm::json::Array arr{"value1", "value2", "value3"};
+ obj.try_emplace("key", std::move(arr));
+ auto result = GetStrings(&obj, "key");
+ ASSERT_EQ(result.size(), 3UL);
+ EXPECT_EQ(result[0], "value1");
+ EXPECT_EQ(result[1], "value2");
+ EXPECT_EQ(result[2], "value3");
+}
+
+TEST(JSONUtilsTest, GetStrings_MixedValues) {
+ llvm::json::Object obj;
+ llvm::json::Array arr{"string", 42, true, nullptr};
+ obj.try_emplace("key", std::move(arr));
+ auto result = GetStrings(&obj, "key");
+ ASSERT_EQ(result.size(), 3UL);
+ EXPECT_EQ(result[0], "string");
+ EXPECT_EQ(result[1], "42");
+ EXPECT_EQ(result[2], "true");
+}
+
+TEST(JSONUtilsTest, GetStrings_NestedArray) {
+ llvm::json::Object obj;
+ llvm::json::Array nested_array{"string", llvm::json::Array{"nested"}};
+ obj.try_emplace("key", std::move(nested_array));
+ auto result = GetStrings(&obj, "key");
+ ASSERT_EQ(result.size(), 1UL);
+ EXPECT_EQ(result[0], "string");
+}
diff --git a/lldb/unittests/DAP/LLDBUtilsTest.cpp b/lldb/unittests/DAP/LLDBUtilsTest.cpp
new file mode 100644
index 0000000..4f619af
--- /dev/null
+++ b/lldb/unittests/DAP/LLDBUtilsTest.cpp
@@ -0,0 +1,65 @@
+//===-- LLDBUtilsTest.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "LLDBUtils.h"
+#include "lldb/API/SBError.h"
+#include "lldb/API/SBStructuredData.h"
+#include "llvm/Support/Error.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+using namespace lldb;
+using namespace lldb_dap;
+
+TEST(LLDBUtilsTest, GetStringValue) {
+ // Create an SBStructuredData object from JSON.
+ const char *json_data = R"("test_string")";
+ SBStructuredData data;
+ SBError error = data.SetFromJSON(json_data);
+
+ // Ensure the JSON was parsed successfully.
+ ASSERT_TRUE(error.Success());
+ ASSERT_TRUE(data.IsValid());
+
+ // Call GetStringValue and verify the result.
+ std::string result = GetStringValue(data);
+ EXPECT_EQ(result, "test_string");
+
+ // Test with invalid SBStructuredData.
+ SBStructuredData invalid_data;
+ result = GetStringValue(invalid_data);
+ EXPECT_EQ(result, "");
+
+ // Test with empty JSON.
+ const char *empty_json = R"("")";
+ SBStructuredData empty_data;
+ error = empty_data.SetFromJSON(empty_json);
+
+ ASSERT_TRUE(error.Success());
+ ASSERT_TRUE(empty_data.IsValid());
+
+ result = GetStringValue(empty_data);
+ EXPECT_EQ(result, "");
+}
+
+TEST(LLDBUtilsTest, ToError) {
+ // Test with a successful SBError.
+ SBError success_error;
+ ASSERT_TRUE(success_error.Success());
+ llvm::Error llvm_error = ToError(success_error);
+ EXPECT_FALSE(llvm_error);
+
+ // Test with a failing SBError.
+ SBError fail_error;
+ fail_error.SetErrorString("Test error message");
+ ASSERT_TRUE(fail_error.Fail());
+ llvm_error = ToError(fail_error);
+
+ std::string error_message = toString(std::move(llvm_error));
+ EXPECT_EQ(error_message, "Test error message");
+}
diff --git a/lldb/unittests/DAP/ProtocolTypesTest.cpp b/lldb/unittests/DAP/ProtocolTypesTest.cpp
new file mode 100644
index 0000000..fa46816
--- /dev/null
+++ b/lldb/unittests/DAP/ProtocolTypesTest.cpp
@@ -0,0 +1,62 @@
+//===-- ProtocolTypesTest.cpp -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Protocol/ProtocolTypes.h"
+#include "llvm/Testing/Support/Error.h"
+#include "gtest/gtest.h"
+
+using namespace lldb;
+using namespace lldb_dap;
+using namespace lldb_dap::protocol;
+
+template <typename T> static llvm::Expected<T> roundtrip(const T &input) {
+ llvm::json::Value value = toJSON(input);
+ llvm::json::Path::Root root;
+ T output;
+ if (!fromJSON(value, output, root))
+ return root.getError();
+ return output;
+}
+
+TEST(ProtocolTypesTest, ExceptionBreakpointsFilter) {
+ ExceptionBreakpointsFilter filter;
+ filter.filter = "testFilter";
+ filter.label = "Test Filter";
+ filter.description = "This is a test filter";
+ filter.defaultState = true;
+ filter.supportsCondition = true;
+ filter.conditionDescription = "Condition for test filter";
+
+ llvm::Expected<ExceptionBreakpointsFilter> deserialized_filter =
+ roundtrip(filter);
+ ASSERT_THAT_EXPECTED(deserialized_filter, llvm::Succeeded());
+
+ EXPECT_EQ(filter.filter, deserialized_filter->filter);
+ EXPECT_EQ(filter.label, deserialized_filter->label);
+ EXPECT_EQ(filter.description, deserialized_filter->description);
+ EXPECT_EQ(filter.defaultState, deserialized_filter->defaultState);
+ EXPECT_EQ(filter.supportsCondition, deserialized_filter->supportsCondition);
+ EXPECT_EQ(filter.conditionDescription,
+ deserialized_filter->conditionDescription);
+}
+
+TEST(ProtocolTypesTest, Source) {
+ Source source;
+ source.name = "testName";
+ source.path = "/path/to/source";
+ source.sourceReference = 12345;
+ source.presentationHint = ePresentationHintEmphasize;
+
+ llvm::Expected<Source> deserialized_source = roundtrip(source);
+ ASSERT_THAT_EXPECTED(deserialized_source, llvm::Succeeded());
+
+ EXPECT_EQ(source.name, deserialized_source->name);
+ EXPECT_EQ(source.path, deserialized_source->path);
+ EXPECT_EQ(source.sourceReference, deserialized_source->sourceReference);
+ EXPECT_EQ(source.presentationHint, deserialized_source->presentationHint);
+}
diff --git a/lldb/unittests/Host/HostTest.cpp b/lldb/unittests/Host/HostTest.cpp
index 9306a86..52224bf 100644
--- a/lldb/unittests/Host/HostTest.cpp
+++ b/lldb/unittests/Host/HostTest.cpp
@@ -90,7 +90,6 @@ TEST(Host, LaunchProcessSetsArgv0) {
ASSERT_THAT(exit_status.get_future().get(), 0);
}
-#ifdef LLVM_ON_UNIX
TEST(Host, LaunchProcessDuplicatesHandle) {
static constexpr llvm::StringLiteral test_msg("Hello subprocess!");
@@ -130,4 +129,3 @@ TEST(Host, LaunchProcessDuplicatesHandle) {
ASSERT_THAT_EXPECTED(bytes_read, llvm::Succeeded());
ASSERT_EQ(llvm::StringRef(msg, *bytes_read), test_msg);
}
-#endif
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 7296bb8..5f14726 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -6363,6 +6363,8 @@ The following ``tag:`` values are valid:
DW_TAG_enumeration_type = 4
DW_TAG_structure_type = 19
DW_TAG_union_type = 23
+ DW_TAG_variant = 25
+ DW_TAG_variant_part = 51
For ``DW_TAG_array_type``, the ``elements:`` should be :ref:`subrange
descriptors <DISubrange>` or :ref:`subrange descriptors
@@ -6398,6 +6400,16 @@ For ``DW_TAG_structure_type``, ``DW_TAG_class_type``, and
``tag: DW_TAG_friend``; or :ref:`subprograms <DISubprogram>` with
``isDefinition: false``.
+``DW_TAG_variant_part`` introduces a variant part of a structure type.
+This should have a discriminant, a member that is used to decide which
+elements are active. The elements of the variant part should each be
+a ``DW_TAG_member``; if a member has a non-null ``ExtraData``, then it
+is a ``ConstantInt`` or ``ConstantDataArray`` indicating the values of
+the discriminant member that cause the activation of this branch. A
+member itself may be of composite type with tag ``DW_TAG_variant``; in
+this case the members of that composite type are inlined into the
+current one.
+
.. _DISubrange:
DISubrange
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index 4b98f58..0e0567c 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -494,10 +494,10 @@ The current vendor extensions supported are:
LLVM implements `version 0.2 of the Qualcomm uC Sync Delay extension specification <https://github.com/quic/riscv-unified-db/releases/latest>`__ by Qualcomm. All instructions are prefixed with `qc.` as described in the specification. These instructions are only available for riscv32.
``Xmipscmov``
- LLVM implements conditional move for the `p8700 processor <https://mips.com/products/hardware/p8700/>` by MIPS.
+ LLVM implements conditional move for the `p8700 processor <https://mips.com/products/hardware/p8700/>`__ by MIPS.
``Xmipslsp``
- LLVM implements load/store pair instructions for the `p8700 processor <https://mips.com/products/hardware/p8700/>` by MIPS.
+ LLVM implements load/store pair instructions for the `p8700 processor <https://mips.com/products/hardware/p8700/>`__ by MIPS.
``experimental-XRivosVisni``
LLVM implements `version 0.1 of the Rivos Vector Integer Small New Instructions extension specification <https://github.com/rivosinc/rivos-custom-extensions>`__.
@@ -508,6 +508,9 @@ The current vendor extensions supported are:
``XAndesPerf``
LLVM implements `version 5.0.0 of the Andes Performance Extension specification <https://github.com/andestech/andes-v5-isa/releases/download/ast-v5_4_0-release/AndeStar_V5_ISA_Spec_UM165-v1.5.08-20250317.pdf>`__ by Andes Technology. All instructions are prefixed with `nds.` as described in the specification.
+``XAndesVPackFPH``
+ LLVM implements `version 5.0.0 of the Andes Vector Packed FP16 Extension specification <https://github.com/andestech/andes-v5-isa/releases/download/ast-v5_4_0-release/AndeStar_V5_ISA_Spec_UM165-v1.5.08-20250317.pdf>`__ by Andes Technology. All instructions are prefixed with `nds.` as described in the specification.
+
Experimental C Intrinsics
=========================
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index 0531836..f4bec50 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -185,6 +185,7 @@ Changes to the RISC-V Backend
interrupt handlers without using inline assembly.
* Adds assembler support for the Andes `XAndesperf` (Andes Performance extension).
* `-mcpu=sifive-p870` was added.
+* Adds assembler support for the Andes `XAndesvpackfph` (Andes Vector Packed FP16 extension).
Changes to the WebAssembly Backend
----------------------------------
diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h
index ba33c49..7fbf09b 100644
--- a/llvm/include/llvm/ADT/APInt.h
+++ b/llvm/include/llvm/ADT/APInt.h
@@ -1366,11 +1366,10 @@ public:
/// This function handles case when \p loBit <= \p hiBit.
void setBits(unsigned loBit, unsigned hiBit) {
assert(hiBit <= BitWidth && "hiBit out of range");
- assert(loBit <= BitWidth && "loBit out of range");
assert(loBit <= hiBit && "loBit greater than hiBit");
if (loBit == hiBit)
return;
- if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
+ if (hiBit <= APINT_BITS_PER_WORD) {
uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
mask <<= loBit;
if (isSingleWord())
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 4e2d37b..3f63913 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1797,7 +1797,7 @@ public:
/// As opposed to the normal scheme of p = phi (0, a) which allows the select
/// to be pulled out of the loop. If the select(.., add, ..) can be predicated
/// by the target, this can lead to cleaner code generation.
- bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty) const;
+ bool preferPredicatedReductionSelect() const;
/// Return true if the loop vectorizer should consider vectorizing an
/// otherwise scalar epilogue loop.
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index a440b64..a80b4c5 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -1087,10 +1087,7 @@ public:
}
virtual bool preferAlternateOpcodeVectorization() const { return true; }
- virtual bool preferPredicatedReductionSelect(unsigned Opcode,
- Type *Ty) const {
- return false;
- }
+ virtual bool preferPredicatedReductionSelect() const { return false; }
virtual bool preferEpilogueVectorization() const { return true; }
diff --git a/llvm/include/llvm/Analysis/VecFuncs.def b/llvm/include/llvm/Analysis/VecFuncs.def
index b9f7a38..68753a2 100644
--- a/llvm/include/llvm/Analysis/VecFuncs.def
+++ b/llvm/include/llvm/Analysis/VecFuncs.def
@@ -954,7 +954,7 @@ TLI_DEFINE_VECFUNC("erfc", "Sleef_erfcdx_u15rvvm2", SCALABLE(2), NOMASK, "_ZGVr
TLI_DEFINE_VECFUNC("erfcf", "Sleef_erfcfx_u15rvvm2", SCALABLE(4), NOMASK, "_ZGVrNxv")
TLI_DEFINE_VECFUNC("exp", "Sleef_expdx_u10rvvm2", SCALABLE(2), NOMASK, "_ZGVrNxv")
-TLI_DEFINE_VECFUNC("llvm.exp.f64", "Sleef_expdx_u10rvvm2", SCALABLE(4), NOMASK, "_ZGVrNxv")
+TLI_DEFINE_VECFUNC("llvm.exp.f64", "Sleef_expdx_u10rvvm2", SCALABLE(2), NOMASK, "_ZGVrNxv")
TLI_DEFINE_VECFUNC("expf", "Sleef_expfx_u10rvvm2", SCALABLE(4), NOMASK, "_ZGVrNxv")
TLI_DEFINE_VECFUNC("llvm.exp.f32", "Sleef_expfx_u10rvvm2", SCALABLE(4), NOMASK, "_ZGVrNxv")
diff --git a/llvm/include/llvm/DebugInfo/BTF/BTF.h b/llvm/include/llvm/DebugInfo/BTF/BTF.h
index d88af2f..bd666e4 100644
--- a/llvm/include/llvm/DebugInfo/BTF/BTF.h
+++ b/llvm/include/llvm/DebugInfo/BTF/BTF.h
@@ -304,14 +304,12 @@ enum PatchableRelocKind : uint32_t {
// For CommonType sub-types that are followed by a single entry of
// some type in the binary format.
#define BTF_DEFINE_TAIL(Type, Accessor) \
- const Type &Accessor() const { return *getTrailingObjects<Type>(); }
+ const Type &Accessor() const { return *getTrailingObjects(); }
// For CommonType sub-types that are followed by CommonType::getVlen()
// number of entries of some type in the binary format.
#define BTF_DEFINE_TAIL_ARR(Type, Accessor) \
- ArrayRef<Type> Accessor() const { \
- return ArrayRef<Type>(getTrailingObjects<Type>(), getVlen()); \
- }
+ ArrayRef<Type> Accessor() const { return getTrailingObjects(getVlen()); }
struct ArrayType final : CommonType,
private TrailingObjects<ArrayType, BTFArray> {
diff --git a/llvm/include/llvm/DebugInfo/DIContext.h b/llvm/include/llvm/DebugInfo/DIContext.h
index c90b999..0347f90 100644
--- a/llvm/include/llvm/DebugInfo/DIContext.h
+++ b/llvm/include/llvm/DebugInfo/DIContext.h
@@ -238,7 +238,7 @@ struct DIDumpOptions {
class DIContext {
public:
- enum DIContextKind { CK_DWARF, CK_PDB, CK_BTF };
+ enum DIContextKind { CK_DWARF, CK_PDB, CK_BTF, CK_GSYM };
DIContext(DIContextKind K) : Kind(K) {}
virtual ~DIContext() = default;
diff --git a/llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h b/llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h
index 18764225..19ec35c 100644
--- a/llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h
+++ b/llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h
@@ -234,11 +234,8 @@ inline bool operator!=(const FunctionInfo &LHS, const FunctionInfo &RHS) {
/// the GSYM file.
inline bool operator<(const FunctionInfo &LHS, const FunctionInfo &RHS) {
// First sort by address range
- if (LHS.Range != RHS.Range)
- return LHS.Range < RHS.Range;
- if (LHS.Inline == RHS.Inline)
- return LHS.OptLineTable < RHS.OptLineTable;
- return LHS.Inline < RHS.Inline;
+ return std::tie(LHS.Range, LHS.Inline, LHS.OptLineTable) <
+ std::tie(RHS.Range, RHS.Inline, RHS.OptLineTable);
}
raw_ostream &operator<<(raw_ostream &OS, const FunctionInfo &R);
diff --git a/llvm/include/llvm/DebugInfo/GSYM/GsymDIContext.h b/llvm/include/llvm/DebugInfo/GSYM/GsymDIContext.h
new file mode 100644
index 0000000..396c08c
--- /dev/null
+++ b/llvm/include/llvm/DebugInfo/GSYM/GsymDIContext.h
@@ -0,0 +1,66 @@
+//===-- GsymDIContext.h --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===/
+
+#ifndef LLVM_DEBUGINFO_GSYM_GSYMDICONTEXT_H
+#define LLVM_DEBUGINFO_GSYM_GSYMDICONTEXT_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include <cstdint>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+namespace gsym {
+
+class GsymReader;
+
+/// GSYM DI Context
+/// This data structure is the top level entity that deals with GSYM
+/// symbolication.
+/// This data structure exists only when there is a need for a transparent
+/// interface to different symbolication formats (e.g. GSYM, PDB and DWARF).
+/// More control and power over the debug information access can be had by using
+/// the GSYM interfaces directly.
+class GsymDIContext : public DIContext {
+public:
+ GsymDIContext(std::unique_ptr<GsymReader> Reader);
+
+ GsymDIContext(GsymDIContext &) = delete;
+ GsymDIContext &operator=(GsymDIContext &) = delete;
+
+ static bool classof(const DIContext *DICtx) {
+ return DICtx->getKind() == CK_GSYM;
+ }
+
+ void dump(raw_ostream &OS, DIDumpOptions DIDumpOpts) override;
+
+ std::optional<DILineInfo> getLineInfoForAddress(
+ object::SectionedAddress Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ std::optional<DILineInfo>
+ getLineInfoForDataAddress(object::SectionedAddress Address) override;
+ DILineInfoTable getLineInfoForAddressRange(
+ object::SectionedAddress Address, uint64_t Size,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ DIInliningInfo getInliningInfoForAddress(
+ object::SectionedAddress Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+
+ std::vector<DILocal>
+ getLocalsForAddress(object::SectionedAddress Address) override;
+
+private:
+ const std::unique_ptr<GsymReader> Reader;
+};
+
+} // end namespace gsym
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
diff --git a/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h b/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h
index 5747ad9..7c6beaa 100644
--- a/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h
+++ b/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h
@@ -58,11 +58,13 @@ public:
bool RelativeAddresses = false;
bool UntagAddresses = false;
bool UseDIA = false;
+ bool DisableGsym = false;
std::string DefaultArch;
std::vector<std::string> DsymHints;
std::string FallbackDebugPath;
std::string DWPName;
std::vector<std::string> DebugFileDirectory;
+ std::vector<std::string> GsymFileDirectory;
size_t MaxCacheSize =
sizeof(size_t) == 4
? 512 * 1024 * 1024 /* 512 MiB */
@@ -177,6 +179,7 @@ private:
ObjectFile *lookUpBuildIDObject(const std::string &Path,
const ELFObjectFileBase *Obj,
const std::string &ArchName);
+ std::string lookUpGsymFile(const std::string &Path);
bool findDebugBinary(const std::string &OrigPath,
const std::string &DebuglinkName, uint32_t CRCHash,
diff --git a/llvm/include/llvm/Demangle/ItaniumDemangle.h b/llvm/include/llvm/Demangle/ItaniumDemangle.h
index 67d6152..295c12a 100644
--- a/llvm/include/llvm/Demangle/ItaniumDemangle.h
+++ b/llvm/include/llvm/Demangle/ItaniumDemangle.h
@@ -3421,7 +3421,7 @@ const typename AbstractManglingParser<
{"or", OperatorInfo::Binary, false, Node::Prec::Ior, "operator|"},
{"pL", OperatorInfo::Binary, false, Node::Prec::Assign, "operator+="},
{"pl", OperatorInfo::Binary, false, Node::Prec::Additive, "operator+"},
- {"pm", OperatorInfo::Member, /*Named*/ false, Node::Prec::PtrMem,
+ {"pm", OperatorInfo::Member, /*Named*/ true, Node::Prec::PtrMem,
"operator->*"},
{"pp", OperatorInfo::Postfix, false, Node::Prec::Postfix, "operator++"},
{"ps", OperatorInfo::Prefix, false, Node::Prec::Unary, "operator+"},
diff --git a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
index be0ec5d..37f3d9a 100644
--- a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
+++ b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
@@ -14,6 +14,7 @@
#ifndef LLVM_FRONTEND_HLSL_HLSLROOTSIGNATURE_H
#define LLVM_FRONTEND_HLSL_HLSLROOTSIGNATURE_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/DXILABI.h"
#include "llvm/Support/raw_ostream.h"
#include <variant>
@@ -122,6 +123,8 @@ struct DescriptorTableClause {
using RootElement = std::variant<RootFlags, RootConstants, DescriptorTable,
DescriptorTableClause>;
+void dumpRootElements(raw_ostream &OS, ArrayRef<RootElement> Elements);
+
} // namespace rootsig
} // namespace hlsl
} // namespace llvm
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index 76efa9b..7137c69 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -672,7 +672,7 @@ public:
StringRef getAsCString() const {
assert(isCString() && "Isn't a C string");
StringRef Str = getAsString();
- return Str.substr(0, Str.size() - 1);
+ return Str.drop_back();
}
/// Return the raw, underlying, bytes of this data. Note that this is an
diff --git a/llvm/include/llvm/IR/DIBuilder.h b/llvm/include/llvm/IR/DIBuilder.h
index d293c28..4ce71bd 100644
--- a/llvm/include/llvm/IR/DIBuilder.h
+++ b/llvm/include/llvm/IR/DIBuilder.h
@@ -406,6 +406,19 @@ namespace llvm {
Constant *Discriminant,
DINode::DIFlags Flags, DIType *Ty);
+ /// Create debugging information entry for a variant. A variant
+ /// created this way "inlines" multiple members into the enclosing
+ /// variant part.
+ /// \param Scope Scope in which this variant is defined.
+ /// \param Elements Variant elements.
+ /// \param Discriminant The discriminant for this branch; null for
+ /// the default branch. This may be a
+ /// ConstantDataArray if the variant applies
+ /// for multiple discriminants.
+ /// \param Ty Parent type.
+ DIDerivedType *createVariantMemberType(DIScope *Scope, DINodeArray Elements,
+ Constant *Discriminant, DIType *Ty);
+
/// Create debugging information entry for a bit field member.
/// \param Scope Member scope.
/// \param Name Member name.
diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h
index 2ad080e..d83fe12 100644
--- a/llvm/include/llvm/IR/DataLayout.h
+++ b/llvm/include/llvm/IR/DataLayout.h
@@ -564,7 +564,9 @@ inline LLVMTargetDataRef wrap(const DataLayout *P) {
/// Used to lazily calculate structure layout information for a target machine,
/// based on the DataLayout structure.
-class StructLayout final : public TrailingObjects<StructLayout, TypeSize> {
+class StructLayout final : private TrailingObjects<StructLayout, TypeSize> {
+ friend TrailingObjects;
+
TypeSize StructSize;
Align StructAlignment;
unsigned IsPadded : 1;
@@ -586,11 +588,11 @@ public:
unsigned getElementContainingOffset(uint64_t FixedOffset) const;
MutableArrayRef<TypeSize> getMemberOffsets() {
- return llvm::MutableArrayRef(getTrailingObjects<TypeSize>(), NumElements);
+ return getTrailingObjects(NumElements);
}
ArrayRef<TypeSize> getMemberOffsets() const {
- return llvm::ArrayRef(getTrailingObjects<TypeSize>(), NumElements);
+ return getTrailingObjects(NumElements);
}
TypeSize getElementOffset(unsigned Idx) const {
@@ -606,10 +608,6 @@ private:
friend class DataLayout; // Only DataLayout can create this class
StructLayout(StructType *ST, const DataLayout &DL);
-
- size_t numTrailingObjects(OverloadToken<TypeSize>) const {
- return NumElements;
- }
};
// The implementation of this method is provided inline as it is particularly
diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h
index c3c3ed3..3501c87 100644
--- a/llvm/include/llvm/IR/InlineAsm.h
+++ b/llvm/include/llvm/IR/InlineAsm.h
@@ -83,7 +83,7 @@ public:
///
FunctionType *getFunctionType() const;
- const std::string &getAsmString() const { return AsmString; }
+ StringRef getAsmString() const { return AsmString; }
StringRef getConstraintString() const { return Constraints; }
void collectAsmStrs(SmallVectorImpl<StringRef> &AsmStrs) const;
diff --git a/llvm/include/llvm/MC/MCContext.h b/llvm/include/llvm/MC/MCContext.h
index e97c890..73c6d57 100644
--- a/llvm/include/llvm/MC/MCContext.h
+++ b/llvm/include/llvm/MC/MCContext.h
@@ -259,13 +259,9 @@ private:
SelectionKey(SelectionKey), UniqueID(UniqueID) {}
bool operator<(const COFFSectionKey &Other) const {
- if (SectionName != Other.SectionName)
- return SectionName < Other.SectionName;
- if (GroupName != Other.GroupName)
- return GroupName < Other.GroupName;
- if (SelectionKey != Other.SelectionKey)
- return SelectionKey < Other.SelectionKey;
- return UniqueID < Other.UniqueID;
+ return std::tie(SectionName, GroupName, SelectionKey, UniqueID) <
+ std::tie(Other.SectionName, Other.GroupName, Other.SelectionKey,
+ Other.UniqueID);
}
};
@@ -279,11 +275,8 @@ private:
: SectionName(SectionName), GroupName(GroupName), UniqueID(UniqueID) {}
bool operator<(const WasmSectionKey &Other) const {
- if (SectionName != Other.SectionName)
- return SectionName < Other.SectionName;
- if (GroupName != Other.GroupName)
- return GroupName < Other.GroupName;
- return UniqueID < Other.UniqueID;
+ return std::tie(SectionName, GroupName, UniqueID) <
+ std::tie(Other.SectionName, Other.GroupName, Other.UniqueID);
}
};
diff --git a/llvm/include/llvm/Support/AArch64AttributeParser.h b/llvm/include/llvm/Support/AArch64AttributeParser.h
index 823ae18..aa82ca1 100644
--- a/llvm/include/llvm/Support/AArch64AttributeParser.h
+++ b/llvm/include/llvm/Support/AArch64AttributeParser.h
@@ -9,13 +9,14 @@
#ifndef LLVM_SUPPORT_AARCH64ATTRIBUTEPARSER_H
#define LLVM_SUPPORT_AARCH64ATTRIBUTEPARSER_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttrParserExtended.h"
#include "llvm/Support/ELFAttributes.h"
namespace llvm {
class AArch64AttributeParser : public ELFExtendedAttrParser {
- static std::vector<SubsectionAndTagToTagName> &returnTagsNamesMap();
+ LLVM_ABI static std::vector<SubsectionAndTagToTagName> &returnTagsNamesMap();
public:
AArch64AttributeParser(ScopedPrinter *Sw)
diff --git a/llvm/include/llvm/Support/AMDGPUMetadata.h b/llvm/include/llvm/Support/AMDGPUMetadata.h
index 76ac7ab7..990c825 100644
--- a/llvm/include/llvm/Support/AMDGPUMetadata.h
+++ b/llvm/include/llvm/Support/AMDGPUMetadata.h
@@ -16,6 +16,7 @@
#define LLVM_SUPPORT_AMDGPUMETADATA_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include <cstdint>
#include <string>
#include <system_error>
@@ -447,10 +448,10 @@ struct Metadata final {
};
/// Converts \p String to \p HSAMetadata.
-std::error_code fromString(StringRef String, Metadata &HSAMetadata);
+LLVM_ABI std::error_code fromString(StringRef String, Metadata &HSAMetadata);
/// Converts \p HSAMetadata to \p String.
-std::error_code toString(Metadata HSAMetadata, std::string &String);
+LLVM_ABI std::error_code toString(Metadata HSAMetadata, std::string &String);
//===----------------------------------------------------------------------===//
// HSA metadata for v3 code object.
diff --git a/llvm/include/llvm/Support/ARMAttributeParser.h b/llvm/include/llvm/Support/ARMAttributeParser.h
index 749f9cd..0fd8a1e 100644
--- a/llvm/include/llvm/Support/ARMAttributeParser.h
+++ b/llvm/include/llvm/Support/ARMAttributeParser.h
@@ -11,6 +11,7 @@
#include "ARMBuildAttributes.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttrParserCompact.h"
#include "llvm/Support/Error.h"
@@ -18,7 +19,7 @@ namespace llvm {
class ScopedPrinter;
-class ARMAttributeParser : public ELFCompactAttrParser {
+class LLVM_ABI ARMAttributeParser : public ELFCompactAttrParser {
struct DisplayHandler {
ARMBuildAttrs::AttrType attribute;
Error (ARMAttributeParser::*routine)(ARMBuildAttrs::AttrType);
diff --git a/llvm/include/llvm/Support/ARMBuildAttributes.h b/llvm/include/llvm/Support/ARMBuildAttributes.h
index 35f8992..68acf30 100644
--- a/llvm/include/llvm/Support/ARMBuildAttributes.h
+++ b/llvm/include/llvm/Support/ARMBuildAttributes.h
@@ -18,12 +18,13 @@
#ifndef LLVM_SUPPORT_ARMBUILDATTRIBUTES_H
#define LLVM_SUPPORT_ARMBUILDATTRIBUTES_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttributes.h"
namespace llvm {
namespace ARMBuildAttrs {
-const TagNameMap &getARMAttributeTags();
+LLVM_ABI const TagNameMap &getARMAttributeTags();
enum SpecialAttr {
// This is for the .cpu asm attr. It translates into one or more
diff --git a/llvm/include/llvm/Support/ARMWinEH.h b/llvm/include/llvm/Support/ARMWinEH.h
index b6710cc..083bbf4 100644
--- a/llvm/include/llvm/Support/ARMWinEH.h
+++ b/llvm/include/llvm/Support/ARMWinEH.h
@@ -10,6 +10,7 @@
#define LLVM_SUPPORT_ARMWINEH_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Endian.h"
namespace llvm {
@@ -205,8 +206,8 @@ inline uint16_t StackAdjustment(const RuntimeFunction &RF) {
/// SavedRegisterMask - Utility function to calculate the set of saved general
/// purpose (r0-r15) and VFP (d0-d31) registers.
-std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF,
- bool Prologue = true);
+LLVM_ABI std::pair<uint16_t, uint32_t>
+SavedRegisterMask(const RuntimeFunction &RF, bool Prologue = true);
/// RuntimeFunctionARM64 - An entry in the table of procedure data (.pdata)
///
diff --git a/llvm/include/llvm/Support/Allocator.h b/llvm/include/llvm/Support/Allocator.h
index 568f0d3..5993419 100644
--- a/llvm/include/llvm/Support/Allocator.h
+++ b/llvm/include/llvm/Support/Allocator.h
@@ -36,8 +36,9 @@ namespace detail {
// We call out to an external function to actually print the message as the
// printing code uses Allocator.h in its implementation.
-void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
- size_t TotalMemory);
+LLVM_ABI void printBumpPtrAllocatorStats(unsigned NumSlabs,
+ size_t BytesAllocated,
+ size_t TotalMemory);
} // end namespace detail
diff --git a/llvm/include/llvm/Support/BalancedPartitioning.h b/llvm/include/llvm/Support/BalancedPartitioning.h
index 539d157..05307d7 100644
--- a/llvm/include/llvm/Support/BalancedPartitioning.h
+++ b/llvm/include/llvm/Support/BalancedPartitioning.h
@@ -41,6 +41,7 @@
#include "raw_ostream.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
#include <atomic>
#include <condition_variable>
@@ -67,7 +68,7 @@ public:
/// The ID of this node
IDT Id;
- void dump(raw_ostream &OS) const;
+ LLVM_ABI void dump(raw_ostream &OS) const;
protected:
/// The list of utility nodes associated with this node
@@ -99,10 +100,10 @@ struct BalancedPartitioningConfig {
class BalancedPartitioning {
public:
- BalancedPartitioning(const BalancedPartitioningConfig &Config);
+ LLVM_ABI BalancedPartitioning(const BalancedPartitioningConfig &Config);
/// Run recursive graph partitioning that optimizes a given objective.
- void run(std::vector<BPFunctionNode> &Nodes) const;
+ LLVM_ABI void run(std::vector<BPFunctionNode> &Nodes) const;
private:
struct UtilitySignature;
@@ -127,7 +128,7 @@ private:
/// Blocking wait for all threads to complete. Unlike ThreadPool, it is
/// acceptable for other threads to add more tasks while blocking on this
/// call.
- void wait();
+ LLVM_ABI void wait();
BPThreadPool(ThreadPoolInterface &TheThreadPool)
: TheThreadPool(TheThreadPool) {}
};
@@ -192,8 +193,8 @@ private:
protected:
/// Compute the move gain for uniform log-gap cost
- static float moveGain(const BPFunctionNode &N, bool FromLeftToRight,
- const SignaturesT &Signatures);
+ LLVM_ABI static float moveGain(const BPFunctionNode &N, bool FromLeftToRight,
+ const SignaturesT &Signatures);
friend class BalancedPartitioningTest_MoveGain_Test;
};
diff --git a/llvm/include/llvm/Support/Base64.h b/llvm/include/llvm/Support/Base64.h
index 3d96884..0dbd87b 100644
--- a/llvm/include/llvm/Support/Base64.h
+++ b/llvm/include/llvm/Support/Base64.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_BASE64_H
#define LLVM_SUPPORT_BASE64_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <string>
@@ -54,7 +55,8 @@ template <class InputBytes> std::string encodeBase64(InputBytes const &Bytes) {
return Buffer;
}
-llvm::Error decodeBase64(llvm::StringRef Input, std::vector<char> &Output);
+LLVM_ABI llvm::Error decodeBase64(llvm::StringRef Input,
+ std::vector<char> &Output);
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/BinaryStreamError.h b/llvm/include/llvm/Support/BinaryStreamError.h
index cf6e034..8a45f7b 100644
--- a/llvm/include/llvm/Support/BinaryStreamError.h
+++ b/llvm/include/llvm/Support/BinaryStreamError.h
@@ -10,6 +10,7 @@
#define LLVM_SUPPORT_BINARYSTREAMERROR_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include <string>
@@ -24,7 +25,7 @@ enum class stream_error_code {
};
/// Base class for errors originating when parsing raw PDB files
-class BinaryStreamError : public ErrorInfo<BinaryStreamError> {
+class LLVM_ABI BinaryStreamError : public ErrorInfo<BinaryStreamError> {
public:
static char ID;
explicit BinaryStreamError(stream_error_code C);
diff --git a/llvm/include/llvm/Support/BinaryStreamReader.h b/llvm/include/llvm/Support/BinaryStreamReader.h
index ca99388..5f0ca06 100644
--- a/llvm/include/llvm/Support/BinaryStreamReader.h
+++ b/llvm/include/llvm/Support/BinaryStreamReader.h
@@ -14,6 +14,7 @@
#include "llvm/Support/Alignment.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
@@ -29,10 +30,11 @@ namespace llvm {
class BinaryStreamReader {
public:
BinaryStreamReader() = default;
- explicit BinaryStreamReader(BinaryStreamRef Ref);
- explicit BinaryStreamReader(BinaryStream &Stream);
- explicit BinaryStreamReader(ArrayRef<uint8_t> Data, llvm::endianness Endian);
- explicit BinaryStreamReader(StringRef Data, llvm::endianness Endian);
+ LLVM_ABI explicit BinaryStreamReader(BinaryStreamRef Ref);
+ LLVM_ABI explicit BinaryStreamReader(BinaryStream &Stream);
+ LLVM_ABI explicit BinaryStreamReader(ArrayRef<uint8_t> Data,
+ llvm::endianness Endian);
+ LLVM_ABI explicit BinaryStreamReader(StringRef Data, llvm::endianness Endian);
BinaryStreamReader(const BinaryStreamReader &Other) = default;
@@ -46,7 +48,7 @@ public:
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readLongestContiguousChunk(ArrayRef<uint8_t> &Buffer);
+ LLVM_ABI Error readLongestContiguousChunk(ArrayRef<uint8_t> &Buffer);
/// Read \p Size bytes from the underlying stream at the current offset and
/// and set \p Buffer to the resulting data slice. Whether a copy occurs
@@ -55,7 +57,7 @@ public:
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readBytes(ArrayRef<uint8_t> &Buffer, uint32_t Size);
+ LLVM_ABI Error readBytes(ArrayRef<uint8_t> &Buffer, uint32_t Size);
/// Read an integer of the specified endianness into \p Dest and update the
/// stream's offset. The data is always copied from the stream's underlying
@@ -91,13 +93,13 @@ public:
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readULEB128(uint64_t &Dest);
+ LLVM_ABI Error readULEB128(uint64_t &Dest);
/// Read a signed LEB128 encoded value.
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readSLEB128(int64_t &Dest);
+ LLVM_ABI Error readSLEB128(int64_t &Dest);
/// Read a null terminated string from \p Dest. Whether a copy occurs depends
/// on the implementation of the underlying stream. Updates the stream's
@@ -105,14 +107,14 @@ public:
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readCString(StringRef &Dest);
+ LLVM_ABI Error readCString(StringRef &Dest);
/// Similar to readCString, however read a null-terminated UTF16 string
/// instead.
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readWideString(ArrayRef<UTF16> &Dest);
+ LLVM_ABI Error readWideString(ArrayRef<UTF16> &Dest);
/// Read a \p Length byte string into \p Dest. Whether a copy occurs depends
/// on the implementation of the underlying stream. Updates the stream's
@@ -120,7 +122,7 @@ public:
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readFixedString(StringRef &Dest, uint32_t Length);
+ LLVM_ABI Error readFixedString(StringRef &Dest, uint32_t Length);
/// Read the entire remainder of the underlying stream into \p Ref. This is
/// equivalent to calling getUnderlyingStream().slice(Offset). Updates the
@@ -128,7 +130,7 @@ public:
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readStreamRef(BinaryStreamRef &Ref);
+ LLVM_ABI Error readStreamRef(BinaryStreamRef &Ref);
/// Read \p Length bytes from the underlying stream into \p Ref. This is
/// equivalent to calling getUnderlyingStream().slice(Offset, Length).
@@ -137,7 +139,7 @@ public:
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readStreamRef(BinaryStreamRef &Ref, uint32_t Length);
+ LLVM_ABI Error readStreamRef(BinaryStreamRef &Ref, uint32_t Length);
/// Read \p Length bytes from the underlying stream into \p Ref. This is
/// equivalent to calling getUnderlyingStream().slice(Offset, Length).
@@ -146,7 +148,7 @@ public:
///
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
- Error readSubstream(BinarySubstreamRef &Ref, uint32_t Length);
+ LLVM_ABI Error readSubstream(BinarySubstreamRef &Ref, uint32_t Length);
/// Get a pointer to an object of type T from the underlying stream, as if by
/// memcpy, and store the result into \p Dest. It is up to the caller to
@@ -251,17 +253,17 @@ public:
///
/// \returns a success error code if at least \p Amount bytes remain in the
/// stream, otherwise returns an appropriate error code.
- Error skip(uint64_t Amount);
+ LLVM_ABI Error skip(uint64_t Amount);
/// Examine the next byte of the underlying stream without advancing the
/// stream's offset. If the stream is empty the behavior is undefined.
///
/// \returns the next byte in the stream.
- uint8_t peek() const;
+ LLVM_ABI uint8_t peek() const;
- Error padToAlignment(uint32_t Align);
+ LLVM_ABI Error padToAlignment(uint32_t Align);
- std::pair<BinaryStreamReader, BinaryStreamReader>
+ LLVM_ABI std::pair<BinaryStreamReader, BinaryStreamReader>
split(uint64_t Offset) const;
private:
diff --git a/llvm/include/llvm/Support/BinaryStreamRef.h b/llvm/include/llvm/Support/BinaryStreamRef.h
index fdc46f5..47009ff 100644
--- a/llvm/include/llvm/Support/BinaryStreamRef.h
+++ b/llvm/include/llvm/Support/BinaryStreamRef.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/BinaryStream.h"
#include "llvm/Support/BinaryStreamError.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <memory>
@@ -160,11 +161,12 @@ class BinaryStreamRef
public:
BinaryStreamRef() = default;
- BinaryStreamRef(BinaryStream &Stream);
- BinaryStreamRef(BinaryStream &Stream, uint64_t Offset,
- std::optional<uint64_t> Length);
- explicit BinaryStreamRef(ArrayRef<uint8_t> Data, llvm::endianness Endian);
- explicit BinaryStreamRef(StringRef Data, llvm::endianness Endian);
+ LLVM_ABI BinaryStreamRef(BinaryStream &Stream);
+ LLVM_ABI BinaryStreamRef(BinaryStream &Stream, uint64_t Offset,
+ std::optional<uint64_t> Length);
+ LLVM_ABI explicit BinaryStreamRef(ArrayRef<uint8_t> Data,
+ llvm::endianness Endian);
+ LLVM_ABI explicit BinaryStreamRef(StringRef Data, llvm::endianness Endian);
BinaryStreamRef(const BinaryStreamRef &Other) = default;
BinaryStreamRef &operator=(const BinaryStreamRef &Other) = default;
@@ -181,16 +183,16 @@ public:
/// \returns a success error code if the entire range of data is within the
/// bounds of this BinaryStreamRef's view and the implementation could read
/// the data, and an appropriate error code otherwise.
- Error readBytes(uint64_t Offset, uint64_t Size,
- ArrayRef<uint8_t> &Buffer) const;
+ LLVM_ABI Error readBytes(uint64_t Offset, uint64_t Size,
+ ArrayRef<uint8_t> &Buffer) const;
/// Given an Offset into this BinaryStreamRef, return a reference to the
/// largest buffer the stream could support without necessitating a copy.
///
/// \returns a success error code if implementation could read the data,
/// and an appropriate error code otherwise.
- Error readLongestContiguousChunk(uint64_t Offset,
- ArrayRef<uint8_t> &Buffer) const;
+ LLVM_ABI Error readLongestContiguousChunk(uint64_t Offset,
+ ArrayRef<uint8_t> &Buffer) const;
};
struct BinarySubstreamRef {
@@ -233,11 +235,12 @@ class WritableBinaryStreamRef
public:
WritableBinaryStreamRef() = default;
- WritableBinaryStreamRef(WritableBinaryStream &Stream);
- WritableBinaryStreamRef(WritableBinaryStream &Stream, uint64_t Offset,
- std::optional<uint64_t> Length);
- explicit WritableBinaryStreamRef(MutableArrayRef<uint8_t> Data,
- llvm::endianness Endian);
+ LLVM_ABI WritableBinaryStreamRef(WritableBinaryStream &Stream);
+ LLVM_ABI WritableBinaryStreamRef(WritableBinaryStream &Stream,
+ uint64_t Offset,
+ std::optional<uint64_t> Length);
+ LLVM_ABI explicit WritableBinaryStreamRef(MutableArrayRef<uint8_t> Data,
+ llvm::endianness Endian);
WritableBinaryStreamRef(const WritableBinaryStreamRef &Other) = default;
WritableBinaryStreamRef &
operator=(const WritableBinaryStreamRef &Other) = default;
@@ -255,13 +258,13 @@ public:
/// \returns a success error code if the data could fit within the underlying
/// stream at the specified location and the implementation could write the
/// data, and an appropriate error code otherwise.
- Error writeBytes(uint64_t Offset, ArrayRef<uint8_t> Data) const;
+ LLVM_ABI Error writeBytes(uint64_t Offset, ArrayRef<uint8_t> Data) const;
/// Conver this WritableBinaryStreamRef to a read-only BinaryStreamRef.
- operator BinaryStreamRef() const;
+ LLVM_ABI operator BinaryStreamRef() const;
/// For buffered streams, commits changes to the backing store.
- Error commit();
+ LLVM_ABI Error commit();
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/BinaryStreamWriter.h b/llvm/include/llvm/Support/BinaryStreamWriter.h
index bc1d794..dddf53b 100644
--- a/llvm/include/llvm/Support/BinaryStreamWriter.h
+++ b/llvm/include/llvm/Support/BinaryStreamWriter.h
@@ -14,6 +14,7 @@
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamError.h"
#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
@@ -30,10 +31,10 @@ namespace llvm {
class BinaryStreamWriter {
public:
BinaryStreamWriter() = default;
- explicit BinaryStreamWriter(WritableBinaryStreamRef Ref);
- explicit BinaryStreamWriter(WritableBinaryStream &Stream);
- explicit BinaryStreamWriter(MutableArrayRef<uint8_t> Data,
- llvm::endianness Endian);
+ LLVM_ABI explicit BinaryStreamWriter(WritableBinaryStreamRef Ref);
+ LLVM_ABI explicit BinaryStreamWriter(WritableBinaryStream &Stream);
+ LLVM_ABI explicit BinaryStreamWriter(MutableArrayRef<uint8_t> Data,
+ llvm::endianness Endian);
BinaryStreamWriter(const BinaryStreamWriter &Other) = default;
@@ -47,7 +48,7 @@ public:
///
/// \returns a success error code if the data was successfully written,
/// otherwise returns an appropriate error code.
- Error writeBytes(ArrayRef<uint8_t> Buffer);
+ LLVM_ABI Error writeBytes(ArrayRef<uint8_t> Buffer);
/// Write the integer \p Value to the underlying stream in the
/// specified endianness. On success, updates the offset so that
@@ -77,14 +78,14 @@ public:
///
/// \returns a success error code if the data was successfully written,
/// otherwise returns an appropriate error code.
- Error writeULEB128(uint64_t Value);
+ LLVM_ABI Error writeULEB128(uint64_t Value);
/// Write the unsigned integer Value to the underlying stream using ULEB128
/// encoding.
///
/// \returns a success error code if the data was successfully written,
/// otherwise returns an appropriate error code.
- Error writeSLEB128(int64_t Value);
+ LLVM_ABI Error writeSLEB128(int64_t Value);
/// Write the string \p Str to the underlying stream followed by a null
/// terminator. On success, updates the offset so that subsequent writes
@@ -93,7 +94,7 @@ public:
///
/// \returns a success error code if the data was successfully written,
/// otherwise returns an appropriate error code.
- Error writeCString(StringRef Str);
+ LLVM_ABI Error writeCString(StringRef Str);
/// Write the string \p Str to the underlying stream without a null
/// terminator. On success, updates the offset so that subsequent writes
@@ -101,7 +102,7 @@ public:
///
/// \returns a success error code if the data was successfully written,
/// otherwise returns an appropriate error code.
- Error writeFixedString(StringRef Str);
+ LLVM_ABI Error writeFixedString(StringRef Str);
/// Efficiently reads all data from \p Ref, and writes it to this stream.
/// This operation will not invoke any copies of the source data, regardless
@@ -109,7 +110,7 @@ public:
///
/// \returns a success error code if the data was successfully written,
/// otherwise returns an appropriate error code.
- Error writeStreamRef(BinaryStreamRef Ref);
+ LLVM_ABI Error writeStreamRef(BinaryStreamRef Ref);
/// Efficiently reads \p Size bytes from \p Ref, and writes it to this stream.
/// This operation will not invoke any copies of the source data, regardless
@@ -117,7 +118,7 @@ public:
///
/// \returns a success error code if the data was successfully written,
/// otherwise returns an appropriate error code.
- Error writeStreamRef(BinaryStreamRef Ref, uint64_t Size);
+ LLVM_ABI Error writeStreamRef(BinaryStreamRef Ref, uint64_t Size);
/// Writes the object \p Obj to the underlying stream, as if by using memcpy.
/// It is up to the caller to ensure that type of \p Obj can be safely copied
@@ -171,13 +172,14 @@ public:
}
/// Splits the Writer into two Writers at a given offset.
- std::pair<BinaryStreamWriter, BinaryStreamWriter> split(uint64_t Off) const;
+ LLVM_ABI std::pair<BinaryStreamWriter, BinaryStreamWriter>
+ split(uint64_t Off) const;
void setOffset(uint64_t Off) { Offset = Off; }
uint64_t getOffset() const { return Offset; }
uint64_t getLength() const { return Stream.getLength(); }
uint64_t bytesRemaining() const { return getLength() - getOffset(); }
- Error padToAlignment(uint32_t Align);
+ LLVM_ABI Error padToAlignment(uint32_t Align);
protected:
WritableBinaryStreamRef Stream;
diff --git a/llvm/include/llvm/Support/BlockFrequency.h b/llvm/include/llvm/Support/BlockFrequency.h
index aeab996..eb979af 100644
--- a/llvm/include/llvm/Support/BlockFrequency.h
+++ b/llvm/include/llvm/Support/BlockFrequency.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_BLOCKFREQUENCY_H
#define LLVM_SUPPORT_BLOCKFREQUENCY_H
+#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstdint>
#include <optional>
@@ -39,13 +40,13 @@ public:
/// Multiplies with a branch probability. The computation will never
/// overflow.
- BlockFrequency &operator*=(BranchProbability Prob);
- BlockFrequency operator*(BranchProbability Prob) const;
+ LLVM_ABI BlockFrequency &operator*=(BranchProbability Prob);
+ LLVM_ABI BlockFrequency operator*(BranchProbability Prob) const;
/// Divide by a non-zero branch probability using saturating
/// arithmetic.
- BlockFrequency &operator/=(BranchProbability Prob);
- BlockFrequency operator/(BranchProbability Prob) const;
+ LLVM_ABI BlockFrequency &operator/=(BranchProbability Prob);
+ LLVM_ABI BlockFrequency operator/(BranchProbability Prob) const;
/// Adds another block frequency using saturating arithmetic.
BlockFrequency &operator+=(BlockFrequency Freq) {
@@ -80,7 +81,7 @@ public:
}
/// Multiplies frequency with `Factor`. Returns `nullopt` in case of overflow.
- std::optional<BlockFrequency> mul(uint64_t Factor) const;
+ LLVM_ABI std::optional<BlockFrequency> mul(uint64_t Factor) const;
/// Shift block frequency to the right by count digits saturating to 1.
BlockFrequency &operator>>=(const unsigned count) {
@@ -120,8 +121,8 @@ public:
}
};
-void printRelativeBlockFreq(raw_ostream &OS, BlockFrequency EntryFreq,
- BlockFrequency Freq);
+LLVM_ABI void printRelativeBlockFreq(raw_ostream &OS, BlockFrequency EntryFreq,
+ BlockFrequency Freq);
} // namespace llvm
diff --git a/llvm/include/llvm/Support/BranchProbability.h b/llvm/include/llvm/Support/BranchProbability.h
index 74731c4..570531e 100644
--- a/llvm/include/llvm/Support/BranchProbability.h
+++ b/llvm/include/llvm/Support/BranchProbability.h
@@ -14,6 +14,7 @@
#define LLVM_SUPPORT_BRANCHPROBABILITY_H
#include "llvm/ADT/ADL.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <algorithm>
#include <cassert>
@@ -42,7 +43,7 @@ class BranchProbability {
public:
BranchProbability() : N(UnknownN) {}
- BranchProbability(uint32_t Numerator, uint32_t Denominator);
+ LLVM_ABI BranchProbability(uint32_t Numerator, uint32_t Denominator);
bool isZero() const { return N == 0; }
bool isUnknown() const { return N == UnknownN; }
@@ -54,8 +55,8 @@ public:
// as denominator.
static BranchProbability getRaw(uint32_t N) { return BranchProbability(N); }
// Create a BranchProbability object from 64-bit integers.
- static BranchProbability getBranchProbability(uint64_t Numerator,
- uint64_t Denominator);
+ LLVM_ABI static BranchProbability getBranchProbability(uint64_t Numerator,
+ uint64_t Denominator);
// Normalize given probabilties so that the sum of them becomes approximate
// one.
@@ -74,9 +75,9 @@ public:
// Return (1 - Probability).
BranchProbability getCompl() const { return BranchProbability(D - N); }
- raw_ostream &print(raw_ostream &OS) const;
+ LLVM_ABI raw_ostream &print(raw_ostream &OS) const;
- void dump() const;
+ LLVM_ABI void dump() const;
/// Scale a large integer.
///
@@ -84,7 +85,7 @@ public:
/// result.
///
/// \return \c Num times \c this.
- uint64_t scale(uint64_t Num) const;
+ LLVM_ABI uint64_t scale(uint64_t Num) const;
/// Scale a large integer by the inverse.
///
@@ -92,7 +93,7 @@ public:
/// Returns the floor of the result.
///
/// \return \c Num divided by \c this.
- uint64_t scaleByInverse(uint64_t Num) const;
+ LLVM_ABI uint64_t scaleByInverse(uint64_t Num) const;
BranchProbability &operator+=(BranchProbability RHS) {
assert(N != UnknownN && RHS.N != UnknownN &&
diff --git a/llvm/include/llvm/Support/BuryPointer.h b/llvm/include/llvm/Support/BuryPointer.h
index 276a5b7..02144ba 100644
--- a/llvm/include/llvm/Support/BuryPointer.h
+++ b/llvm/include/llvm/Support/BuryPointer.h
@@ -9,6 +9,7 @@
#ifndef LLVM_SUPPORT_BURYPOINTER_H
#define LLVM_SUPPORT_BURYPOINTER_H
+#include "llvm/Support/Compiler.h"
#include <memory>
namespace llvm {
@@ -19,7 +20,7 @@ namespace llvm {
// the memory is not misdiagnosed as an unintentional leak by leak detection
// tools (this is achieved by preserving pointers to the object in a globally
// visible array).
-void BuryPointer(const void *Ptr);
+LLVM_ABI void BuryPointer(const void *Ptr);
template <typename T> void BuryPointer(std::unique_ptr<T> Ptr) {
BuryPointer(Ptr.release());
}
diff --git a/llvm/include/llvm/Support/COM.h b/llvm/include/llvm/Support/COM.h
index d59966f..e2701ca 100644
--- a/llvm/include/llvm/Support/COM.h
+++ b/llvm/include/llvm/Support/COM.h
@@ -14,6 +14,8 @@
#ifndef LLVM_SUPPORT_COM_H
#define LLVM_SUPPORT_COM_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
namespace sys {
@@ -21,9 +23,9 @@ enum class COMThreadingMode { SingleThreaded, MultiThreaded };
class InitializeCOMRAII {
public:
- explicit InitializeCOMRAII(COMThreadingMode Threading,
- bool SpeedOverMemory = false);
- ~InitializeCOMRAII();
+ LLVM_ABI explicit InitializeCOMRAII(COMThreadingMode Threading,
+ bool SpeedOverMemory = false);
+ LLVM_ABI ~InitializeCOMRAII();
private:
InitializeCOMRAII(const InitializeCOMRAII &) = delete;
diff --git a/llvm/include/llvm/Support/CRC.h b/llvm/include/llvm/Support/CRC.h
index 210890a..a07b017 100644
--- a/llvm/include/llvm/Support/CRC.h
+++ b/llvm/include/llvm/Support/CRC.h
@@ -13,17 +13,18 @@
#ifndef LLVM_SUPPORT_CRC_H
#define LLVM_SUPPORT_CRC_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
template <typename T> class ArrayRef;
// Compute the CRC-32 of Data.
-uint32_t crc32(ArrayRef<uint8_t> Data);
+LLVM_ABI uint32_t crc32(ArrayRef<uint8_t> Data);
// Compute the running CRC-32 of Data, with CRC being the previous value of the
// checksum.
-uint32_t crc32(uint32_t CRC, ArrayRef<uint8_t> Data);
+LLVM_ABI uint32_t crc32(uint32_t CRC, ArrayRef<uint8_t> Data);
// Class for computing the JamCRC.
//
@@ -47,7 +48,7 @@ public:
JamCRC(uint32_t Init = 0xFFFFFFFFU) : CRC(Init) {}
// Update the CRC calculation with Data.
- void update(ArrayRef<uint8_t> Data);
+ LLVM_ABI void update(ArrayRef<uint8_t> Data);
uint32_t getCRC() const { return CRC; }
diff --git a/llvm/include/llvm/Support/CSKYAttributeParser.h b/llvm/include/llvm/Support/CSKYAttributeParser.h
index 08257a7..0d28c92 100644
--- a/llvm/include/llvm/Support/CSKYAttributeParser.h
+++ b/llvm/include/llvm/Support/CSKYAttributeParser.h
@@ -10,10 +10,11 @@
#define LLVM_SUPPORT_CSKYATTRIBUTEPARSER_H
#include "llvm/Support/CSKYAttributes.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttrParserCompact.h"
namespace llvm {
-class CSKYAttributeParser : public ELFCompactAttrParser {
+class LLVM_ABI CSKYAttributeParser : public ELFCompactAttrParser {
struct DisplayHandler {
CSKYAttrs::AttrType attribute;
Error (CSKYAttributeParser::*routine)(unsigned);
diff --git a/llvm/include/llvm/Support/CSKYAttributes.h b/llvm/include/llvm/Support/CSKYAttributes.h
index 723f2ce..c16d3de 100644
--- a/llvm/include/llvm/Support/CSKYAttributes.h
+++ b/llvm/include/llvm/Support/CSKYAttributes.h
@@ -12,12 +12,13 @@
#ifndef LLVM_SUPPORT_CSKYATTRIBUTES_H
#define LLVM_SUPPORT_CSKYATTRIBUTES_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttributes.h"
namespace llvm {
namespace CSKYAttrs {
-const TagNameMap &getCSKYAttributeTags();
+LLVM_ABI const TagNameMap &getCSKYAttributeTags();
enum AttrType {
CSKY_ARCH_NAME = 4,
diff --git a/llvm/include/llvm/Support/CachePruning.h b/llvm/include/llvm/Support/CachePruning.h
index 17e1488..a677a68 100644
--- a/llvm/include/llvm/Support/CachePruning.h
+++ b/llvm/include/llvm/Support/CachePruning.h
@@ -14,6 +14,7 @@
#ifndef LLVM_SUPPORT_CACHEPRUNING_H
#define LLVM_SUPPORT_CACHEPRUNING_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include <chrono>
#include <optional>
@@ -66,7 +67,8 @@ struct CachePruningPolicy {
/// For example: "prune_interval=30s:prune_after=24h:cache_size=50%"
/// which means a pruning interval of 30 seconds, expiration time of 24 hours
/// and maximum cache size of 50% of available disk space.
-Expected<CachePruningPolicy> parseCachePruningPolicy(StringRef PolicyStr);
+LLVM_ABI Expected<CachePruningPolicy>
+parseCachePruningPolicy(StringRef PolicyStr);
/// Peform pruning using the supplied policy, returns true if pruning
/// occurred, i.e. if Policy.Interval was expired.
@@ -79,8 +81,9 @@ Expected<CachePruningPolicy> parseCachePruningPolicy(StringRef PolicyStr);
/// As a safeguard against data loss if the user specifies the wrong directory
/// as their cache directory, this function will ignore files not matching the
/// pattern "llvmcache-*".
-bool pruneCache(StringRef Path, CachePruningPolicy Policy,
- const std::vector<std::unique_ptr<MemoryBuffer>> &Files = {});
+LLVM_ABI bool
+pruneCache(StringRef Path, CachePruningPolicy Policy,
+ const std::vector<std::unique_ptr<MemoryBuffer>> &Files = {});
} // namespace llvm
#endif
diff --git a/llvm/include/llvm/Support/Caching.h b/llvm/include/llvm/Support/Caching.h
index 9a82921..7fd9bef 100644
--- a/llvm/include/llvm/Support/Caching.h
+++ b/llvm/include/llvm/Support/Caching.h
@@ -15,6 +15,7 @@
#ifndef LLVM_SUPPORT_CACHING_H
#define LLVM_SUPPORT_CACHING_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
namespace llvm {
@@ -113,7 +114,7 @@ using AddBufferFn = std::function<void(unsigned Task, const Twine &ModuleName,
/// done lazily the first time a file is added. The cache name appears in error
/// messages for errors during caching. The temporary file prefix is used in the
/// temporary file naming scheme used when writing files atomically.
-Expected<FileCache> localCache(
+LLVM_ABI Expected<FileCache> localCache(
const Twine &CacheNameRef, const Twine &TempFilePrefixRef,
const Twine &CacheDirectoryPathRef,
AddBufferFn AddBuffer = [](size_t Task, const Twine &ModuleName,
diff --git a/llvm/include/llvm/Support/Chrono.h b/llvm/include/llvm/Support/Chrono.h
index 71859af..5b8102d 100644
--- a/llvm/include/llvm/Support/Chrono.h
+++ b/llvm/include/llvm/Support/Chrono.h
@@ -77,8 +77,8 @@ toTimePoint(std::time_t T, uint32_t nsec) {
} // namespace sys
-raw_ostream &operator<<(raw_ostream &OS, sys::TimePoint<> TP);
-raw_ostream &operator<<(raw_ostream &OS, sys::UtcTime<> TP);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, sys::TimePoint<> TP);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, sys::UtcTime<> TP);
/// Format provider for TimePoint<>
///
@@ -90,25 +90,37 @@ raw_ostream &operator<<(raw_ostream &OS, sys::UtcTime<> TP);
/// If no options are given, the default format is "%Y-%m-%d %H:%M:%S.%N".
template <>
struct format_provider<sys::TimePoint<>> {
- static void format(const sys::TimePoint<> &TP, llvm::raw_ostream &OS,
- StringRef Style);
+ LLVM_ABI static void format(const sys::TimePoint<> &TP, llvm::raw_ostream &OS,
+ StringRef Style);
};
template <> struct format_provider<sys::UtcTime<std::chrono::seconds>> {
- static void format(const sys::UtcTime<std::chrono::seconds> &TP,
- llvm::raw_ostream &OS, StringRef Style);
+ LLVM_ABI static void format(const sys::UtcTime<std::chrono::seconds> &TP,
+ llvm::raw_ostream &OS, StringRef Style);
};
namespace detail {
template <typename Period> struct unit { static const char value[]; };
template <typename Period> const char unit<Period>::value[] = "";
-template <> struct unit<std::ratio<3600>> { static const char value[]; };
-template <> struct unit<std::ratio<60>> { static const char value[]; };
-template <> struct unit<std::ratio<1>> { static const char value[]; };
-template <> struct unit<std::milli> { static const char value[]; };
-template <> struct unit<std::micro> { static const char value[]; };
-template <> struct unit<std::nano> { static const char value[]; };
+template <> struct unit<std::ratio<3600>> {
+ LLVM_ABI static const char value[];
+};
+template <> struct unit<std::ratio<60>> {
+ LLVM_ABI static const char value[];
+};
+template <> struct unit<std::ratio<1>> {
+ LLVM_ABI static const char value[];
+};
+template <> struct unit<std::milli> {
+ LLVM_ABI static const char value[];
+};
+template <> struct unit<std::micro> {
+ LLVM_ABI static const char value[];
+};
+template <> struct unit<std::nano> {
+ LLVM_ABI static const char value[];
+};
} // namespace detail
/// Implementation of format_provider<T> for duration types.
diff --git a/llvm/include/llvm/Support/CommandLine.h b/llvm/include/llvm/Support/CommandLine.h
index 5317505..5489b3ff 100644
--- a/llvm/include/llvm/Support/CommandLine.h
+++ b/llvm/include/llvm/Support/CommandLine.h
@@ -27,6 +27,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/raw_ostream.h"
@@ -65,11 +66,11 @@ namespace cl {
// that give precedence to later occurrences. If your program supports options
// that give precedence to earlier occurrences, you will need to extend this
// function to support it correctly.
-bool ParseCommandLineOptions(int argc, const char *const *argv,
- StringRef Overview = "",
- raw_ostream *Errs = nullptr,
- const char *EnvVar = nullptr,
- bool LongOptionsUseDoubleDash = false);
+LLVM_ABI bool ParseCommandLineOptions(int argc, const char *const *argv,
+ StringRef Overview = "",
+ raw_ostream *Errs = nullptr,
+ const char *EnvVar = nullptr,
+ bool LongOptionsUseDoubleDash = false);
// Function pointer type for printing version information.
using VersionPrinterTy = std::function<void(raw_ostream &)>;
@@ -78,20 +79,20 @@ using VersionPrinterTy = std::function<void(raw_ostream &)>;
/// Override the default (LLVM specific) version printer used to print out the
/// version when --version is given on the command line. This allows other
/// systems using the CommandLine utilities to print their own version string.
-void SetVersionPrinter(VersionPrinterTy func);
+LLVM_ABI void SetVersionPrinter(VersionPrinterTy func);
///===---------------------------------------------------------------------===//
/// Add an extra printer to use in addition to the default one. This can be
/// called multiple times, and each time it adds a new function to the list
/// which will be called after the basic LLVM version printing is complete.
/// Each can then add additional information specific to the tool.
-void AddExtraVersionPrinter(VersionPrinterTy func);
+LLVM_ABI void AddExtraVersionPrinter(VersionPrinterTy func);
// Print option values.
// With -print-options print the difference between option values and defaults.
// With -print-all-options print all option values.
// (Currently not perfect, but best-effort.)
-void PrintOptionValues();
+LLVM_ABI void PrintOptionValues();
// Forward declaration - AddLiteralOption needs to be up here to make gcc happy.
class Option;
@@ -103,7 +104,7 @@ class Option;
///
/// Literal options are used by some parsers to register special option values.
/// This is how the PassNameParser registers pass names for opt.
-void AddLiteralOption(Option &O, StringRef Name);
+LLVM_ABI void AddLiteralOption(Option &O, StringRef Name);
//===----------------------------------------------------------------------===//
// Flags permitted to be passed to command line arguments
@@ -181,7 +182,7 @@ private:
StringRef const Name;
StringRef const Description;
- void registerCategory();
+ LLVM_ABI void registerCategory();
public:
OptionCategory(StringRef const Name,
@@ -195,7 +196,7 @@ public:
};
// The general Option Category (used as default category).
-OptionCategory &getGeneralCategory();
+LLVM_ABI OptionCategory &getGeneralCategory();
//===----------------------------------------------------------------------===//
//
@@ -205,8 +206,8 @@ private:
StringRef Description;
protected:
- void registerSubCommand();
- void unregisterSubCommand();
+ LLVM_ABI void registerSubCommand();
+ LLVM_ABI void unregisterSubCommand();
public:
SubCommand(StringRef Name, StringRef Description = "")
@@ -216,15 +217,15 @@ public:
SubCommand() = default;
// Get the special subcommand representing no subcommand.
- static SubCommand &getTopLevel();
+ LLVM_ABI static SubCommand &getTopLevel();
// Get the special subcommand that can be used to put an option into all
// subcommands.
- static SubCommand &getAll();
+ LLVM_ABI static SubCommand &getAll();
- void reset();
+ LLVM_ABI void reset();
- explicit operator bool() const;
+ LLVM_ABI explicit operator bool() const;
StringRef getName() const { return Name; }
StringRef getDescription() const { return Description; }
@@ -247,7 +248,7 @@ public:
//===----------------------------------------------------------------------===//
//
-class Option {
+class LLVM_ABI Option {
friend class alias;
// Overriden by subclasses to handle the value passed into an argument. Should
@@ -529,7 +530,7 @@ callback(F CB) {
//===----------------------------------------------------------------------===//
// Support value comparison outside the template.
-struct GenericOptionValue {
+struct LLVM_ABI GenericOptionValue {
virtual bool compare(const GenericOptionValue &V) const = 0;
protected:
@@ -636,7 +637,7 @@ struct OptionValue final
// Other safe-to-copy-by-value common option types.
enum boolOrDefault { BOU_UNSET, BOU_TRUE, BOU_FALSE };
template <>
-struct OptionValue<cl::boolOrDefault> final
+struct LLVM_ABI OptionValue<cl::boolOrDefault> final
: OptionValueCopy<cl::boolOrDefault> {
using WrapperType = cl::boolOrDefault;
@@ -654,7 +655,7 @@ private:
};
template <>
-struct OptionValue<std::string> final : OptionValueCopy<std::string> {
+struct LLVM_ABI OptionValue<std::string> final : OptionValueCopy<std::string> {
using WrapperType = StringRef;
OptionValue() = default;
@@ -724,7 +725,7 @@ template <typename... OptsTy> ValuesClass values(OptsTy... Options) {
// every instance of the generic parser. This also allows us to put stuff into
// CommandLine.cpp
//
-class generic_parser_base {
+class LLVM_ABI generic_parser_base {
protected:
class GenericOptionInfo {
public:
@@ -890,7 +891,8 @@ public:
//--------------------------------------------------
// Super class of parsers to provide boilerplate code
//
-class basic_parser_impl { // non-template implementation of basic_parser<t>
+class LLVM_ABI
+ basic_parser_impl { // non-template implementation of basic_parser<t>
public:
basic_parser_impl(Option &) {}
@@ -939,9 +941,9 @@ public:
//--------------------------------------------------
-extern template class basic_parser<bool>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<bool>;
-template <> class parser<bool> : public basic_parser<bool> {
+template <> class LLVM_ABI parser<bool> : public basic_parser<bool> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -966,9 +968,10 @@ public:
//--------------------------------------------------
-extern template class basic_parser<boolOrDefault>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<boolOrDefault>;
-template <> class parser<boolOrDefault> : public basic_parser<boolOrDefault> {
+template <>
+class LLVM_ABI parser<boolOrDefault> : public basic_parser<boolOrDefault> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -991,9 +994,9 @@ public:
//--------------------------------------------------
-extern template class basic_parser<int>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<int>;
-template <> class parser<int> : public basic_parser<int> {
+template <> class LLVM_ABI parser<int> : public basic_parser<int> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1012,9 +1015,9 @@ public:
//--------------------------------------------------
-extern template class basic_parser<long>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<long>;
-template <> class parser<long> final : public basic_parser<long> {
+template <> class LLVM_ABI parser<long> final : public basic_parser<long> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1033,9 +1036,9 @@ public:
//--------------------------------------------------
-extern template class basic_parser<long long>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<long long>;
-template <> class parser<long long> : public basic_parser<long long> {
+template <> class LLVM_ABI parser<long long> : public basic_parser<long long> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1054,9 +1057,9 @@ public:
//--------------------------------------------------
-extern template class basic_parser<unsigned>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<unsigned>;
-template <> class parser<unsigned> : public basic_parser<unsigned> {
+template <> class LLVM_ABI parser<unsigned> : public basic_parser<unsigned> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1075,10 +1078,11 @@ public:
//--------------------------------------------------
-extern template class basic_parser<unsigned long>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<unsigned long>;
template <>
-class parser<unsigned long> final : public basic_parser<unsigned long> {
+class LLVM_ABI parser<unsigned long> final
+ : public basic_parser<unsigned long> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1097,10 +1101,11 @@ public:
//--------------------------------------------------
-extern template class basic_parser<unsigned long long>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<unsigned long long>;
template <>
-class parser<unsigned long long> : public basic_parser<unsigned long long> {
+class LLVM_ABI parser<unsigned long long>
+ : public basic_parser<unsigned long long> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1120,9 +1125,9 @@ public:
//--------------------------------------------------
-extern template class basic_parser<double>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<double>;
-template <> class parser<double> : public basic_parser<double> {
+template <> class LLVM_ABI parser<double> : public basic_parser<double> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1141,9 +1146,9 @@ public:
//--------------------------------------------------
-extern template class basic_parser<float>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<float>;
-template <> class parser<float> : public basic_parser<float> {
+template <> class LLVM_ABI parser<float> : public basic_parser<float> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1162,9 +1167,10 @@ public:
//--------------------------------------------------
-extern template class basic_parser<std::string>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<std::string>;
-template <> class parser<std::string> : public basic_parser<std::string> {
+template <>
+class LLVM_ABI parser<std::string> : public basic_parser<std::string> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1186,9 +1192,9 @@ public:
//--------------------------------------------------
-extern template class basic_parser<char>;
+extern template class LLVM_TEMPLATE_ABI basic_parser<char>;
-template <> class parser<char> : public basic_parser<char> {
+template <> class LLVM_ABI parser<char> : public basic_parser<char> {
public:
parser(Option &O) : basic_parser(O) {}
@@ -1906,7 +1912,7 @@ public:
// Aliased command line option (alias this name to a preexisting name)
//
-class alias : public Option {
+class LLVM_ABI alias : public Option {
Option *AliasFor;
bool handleOccurrence(unsigned pos, StringRef /*ArgName*/,
@@ -1979,27 +1985,27 @@ struct aliasopt {
struct extrahelp {
StringRef morehelp;
- explicit extrahelp(StringRef help);
+ LLVM_ABI explicit extrahelp(StringRef help);
};
-void PrintVersionMessage();
+LLVM_ABI void PrintVersionMessage();
/// This function just prints the help message, exactly the same way as if the
/// -help or -help-hidden option had been given on the command line.
///
/// \param Hidden if true will print hidden options
/// \param Categorized if true print options in categories
-void PrintHelpMessage(bool Hidden = false, bool Categorized = false);
+LLVM_ABI void PrintHelpMessage(bool Hidden = false, bool Categorized = false);
/// An array of optional enabled settings in the LLVM build configuration,
/// which may be of interest to compiler developers. For example, includes
/// "+assertions" if assertions are enabled. Used by printBuildConfig.
-ArrayRef<StringRef> getCompilerBuildConfig();
+LLVM_ABI ArrayRef<StringRef> getCompilerBuildConfig();
/// Prints the compiler build configuration.
/// Designed for compiler developers, not compiler end-users.
/// Intended to be used in --version output when enabled.
-void printBuildConfig(raw_ostream &OS);
+LLVM_ABI void printBuildConfig(raw_ostream &OS);
//===----------------------------------------------------------------------===//
// Public interface for accessing registered options.
@@ -2032,7 +2038,7 @@ void printBuildConfig(raw_ostream &OS);
/// Hopefully this API can be deprecated soon. Any situation where options need
/// to be modified by tools or libraries should be handled by sane APIs rather
/// than just handing around a global list.
-StringMap<Option *> &
+LLVM_ABI StringMap<Option *> &
getRegisteredOptions(SubCommand &Sub = SubCommand::getTopLevel());
/// Use this to get all registered SubCommands from the provided parser.
@@ -2054,7 +2060,7 @@ getRegisteredOptions(SubCommand &Sub = SubCommand::getTopLevel());
///
/// This interface is useful for defining subcommands in libraries and
/// the dispatch from a single point (like in the main function).
-iterator_range<typename SmallPtrSet<SubCommand *, 4>::iterator>
+LLVM_ABI iterator_range<typename SmallPtrSet<SubCommand *, 4>::iterator>
getRegisteredSubcommands();
//===----------------------------------------------------------------------===//
@@ -2073,9 +2079,9 @@ getRegisteredSubcommands();
/// \param [in] MarkEOLs true if tokenizing a response file and you want end of
/// lines and end of the response file to be marked with a nullptr string.
/// \param [out] NewArgv All parsed strings are appended to NewArgv.
-void TokenizeGNUCommandLine(StringRef Source, StringSaver &Saver,
- SmallVectorImpl<const char *> &NewArgv,
- bool MarkEOLs = false);
+LLVM_ABI void TokenizeGNUCommandLine(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs = false);
/// Tokenizes a string of Windows command line arguments, which may contain
/// quotes and escaped quotes.
@@ -2091,16 +2097,17 @@ void TokenizeGNUCommandLine(StringRef Source, StringSaver &Saver,
/// \param [in] MarkEOLs true if tokenizing a response file and you want end of
/// lines and end of the response file to be marked with a nullptr string.
/// \param [out] NewArgv All parsed strings are appended to NewArgv.
-void TokenizeWindowsCommandLine(StringRef Source, StringSaver &Saver,
- SmallVectorImpl<const char *> &NewArgv,
- bool MarkEOLs = false);
+LLVM_ABI void TokenizeWindowsCommandLine(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs = false);
/// Tokenizes a Windows command line while attempting to avoid copies. If no
/// quoting or escaping was used, this produces substrings of the original
/// string. If a token requires unquoting, it will be allocated with the
/// StringSaver.
-void TokenizeWindowsCommandLineNoCopy(StringRef Source, StringSaver &Saver,
- SmallVectorImpl<StringRef> &NewArgv);
+LLVM_ABI void
+TokenizeWindowsCommandLineNoCopy(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<StringRef> &NewArgv);
/// Tokenizes a Windows full command line, including command name at the start.
///
@@ -2115,9 +2122,10 @@ void TokenizeWindowsCommandLineNoCopy(StringRef Source, StringSaver &Saver,
/// if you set MarkEOLs = true, then the first word of every line will be
/// parsed using the special rules for command names, making this function
/// suitable for parsing a file full of commands to execute.
-void TokenizeWindowsCommandLineFull(StringRef Source, StringSaver &Saver,
- SmallVectorImpl<const char *> &NewArgv,
- bool MarkEOLs = false);
+LLVM_ABI void
+TokenizeWindowsCommandLineFull(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs = false);
/// String tokenization function type. Should be compatible with either
/// Windows or Unix command line tokenizers.
@@ -2134,9 +2142,9 @@ using TokenizerCallback = void (*)(StringRef Source, StringSaver &Saver,
///
/// It works like TokenizeGNUCommandLine with ability to skip comment lines.
///
-void tokenizeConfigFile(StringRef Source, StringSaver &Saver,
- SmallVectorImpl<const char *> &NewArgv,
- bool MarkEOLs = false);
+LLVM_ABI void tokenizeConfigFile(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs = false);
/// Contains options that control response file expansion.
class ExpansionContext {
@@ -2171,7 +2179,7 @@ class ExpansionContext {
SmallVectorImpl<const char *> &NewArgv);
public:
- ExpansionContext(BumpPtrAllocator &A, TokenizerCallback T);
+ LLVM_ABI ExpansionContext(BumpPtrAllocator &A, TokenizerCallback T);
ExpansionContext &setMarkEOLs(bool X) {
MarkEOLs = X;
@@ -2207,7 +2215,8 @@ public:
/// If the specified file name contains a directory separator, it is searched
/// for by its absolute path. Otherwise looks for file sequentially in
/// directories specified by SearchDirs field.
- bool findConfigFile(StringRef FileName, SmallVectorImpl<char> &FilePath);
+ LLVM_ABI bool findConfigFile(StringRef FileName,
+ SmallVectorImpl<char> &FilePath);
/// Reads command line options from the given configuration file.
///
@@ -2219,31 +2228,34 @@ public:
/// commands resolving file names in them relative to the directory where
/// CfgFilename resides. It also expands "<CFGDIR>" to the base path of the
/// current config file.
- Error readConfigFile(StringRef CfgFile, SmallVectorImpl<const char *> &Argv);
+ LLVM_ABI Error readConfigFile(StringRef CfgFile,
+ SmallVectorImpl<const char *> &Argv);
/// Expands constructs "@file" in the provided array of arguments recursively.
- Error expandResponseFiles(SmallVectorImpl<const char *> &Argv);
+ LLVM_ABI Error expandResponseFiles(SmallVectorImpl<const char *> &Argv);
};
/// A convenience helper which concatenates the options specified by the
/// environment variable EnvVar and command line options, then expands
/// response files recursively.
/// \return true if all @files were expanded successfully or there were none.
-bool expandResponseFiles(int Argc, const char *const *Argv, const char *EnvVar,
- SmallVectorImpl<const char *> &NewArgv);
+LLVM_ABI bool expandResponseFiles(int Argc, const char *const *Argv,
+ const char *EnvVar,
+ SmallVectorImpl<const char *> &NewArgv);
/// A convenience helper which supports the typical use case of expansion
/// function call.
-bool ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
- SmallVectorImpl<const char *> &Argv);
+LLVM_ABI bool ExpandResponseFiles(StringSaver &Saver,
+ TokenizerCallback Tokenizer,
+ SmallVectorImpl<const char *> &Argv);
/// A convenience helper which concatenates the options specified by the
/// environment variable EnvVar and command line options, then expands response
/// files recursively. The tokenizer is a predefined GNU or Windows one.
/// \return true if all @files were expanded successfully or there were none.
-bool expandResponseFiles(int Argc, const char *const *Argv, const char *EnvVar,
- StringSaver &Saver,
- SmallVectorImpl<const char *> &NewArgv);
+LLVM_ABI bool expandResponseFiles(int Argc, const char *const *Argv,
+ const char *EnvVar, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv);
/// Mark all options not part of this category as cl::ReallyHidden.
///
@@ -2252,8 +2264,8 @@ bool expandResponseFiles(int Argc, const char *const *Argv, const char *EnvVar,
/// Some tools (like clang-format) like to be able to hide all options that are
/// not specific to the tool. This function allows a tool to specify a single
/// option category to display in the -help output.
-void HideUnrelatedOptions(cl::OptionCategory &Category,
- SubCommand &Sub = SubCommand::getTopLevel());
+LLVM_ABI void HideUnrelatedOptions(cl::OptionCategory &Category,
+ SubCommand &Sub = SubCommand::getTopLevel());
/// Mark all options not part of the categories as cl::ReallyHidden.
///
@@ -2262,22 +2274,23 @@ void HideUnrelatedOptions(cl::OptionCategory &Category,
/// Some tools (like clang-format) like to be able to hide all options that are
/// not specific to the tool. This function allows a tool to specify a single
/// option category to display in the -help output.
-void HideUnrelatedOptions(ArrayRef<const cl::OptionCategory *> Categories,
- SubCommand &Sub = SubCommand::getTopLevel());
+LLVM_ABI void
+HideUnrelatedOptions(ArrayRef<const cl::OptionCategory *> Categories,
+ SubCommand &Sub = SubCommand::getTopLevel());
/// Reset all command line options to a state that looks as if they have
/// never appeared on the command line. This is useful for being able to parse
/// a command line multiple times (especially useful for writing tests).
-void ResetAllOptionOccurrences();
+LLVM_ABI void ResetAllOptionOccurrences();
/// Reset the command line parser back to its initial state. This
/// removes
/// all options, categories, and subcommands and returns the parser to a state
/// where no options are supported.
-void ResetCommandLineParser();
+LLVM_ABI void ResetCommandLineParser();
/// Parses `Arg` into the option handler `Handler`.
-bool ProvidePositionalOption(Option *Handler, StringRef Arg, int i);
+LLVM_ABI bool ProvidePositionalOption(Option *Handler, StringRef Arg, int i);
} // end namespace cl
diff --git a/llvm/include/llvm/Support/Compression.h b/llvm/include/llvm/Support/Compression.h
index 2a8da9e..246ccbd 100644
--- a/llvm/include/llvm/Support/Compression.h
+++ b/llvm/include/llvm/Support/Compression.h
@@ -14,6 +14,7 @@
#define LLVM_SUPPORT_COMPRESSION_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -38,17 +39,18 @@ constexpr int BestSpeedCompression = 1;
constexpr int DefaultCompression = 6;
constexpr int BestSizeCompression = 9;
-bool isAvailable();
+LLVM_ABI bool isAvailable();
-void compress(ArrayRef<uint8_t> Input,
- SmallVectorImpl<uint8_t> &CompressedBuffer,
- int Level = DefaultCompression);
+LLVM_ABI void compress(ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &CompressedBuffer,
+ int Level = DefaultCompression);
-Error decompress(ArrayRef<uint8_t> Input, uint8_t *Output,
- size_t &UncompressedSize);
+LLVM_ABI Error decompress(ArrayRef<uint8_t> Input, uint8_t *Output,
+ size_t &UncompressedSize);
-Error decompress(ArrayRef<uint8_t> Input, SmallVectorImpl<uint8_t> &Output,
- size_t UncompressedSize);
+LLVM_ABI Error decompress(ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &Output,
+ size_t UncompressedSize);
} // End of namespace zlib
@@ -59,17 +61,18 @@ constexpr int BestSpeedCompression = 1;
constexpr int DefaultCompression = 5;
constexpr int BestSizeCompression = 12;
-bool isAvailable();
+LLVM_ABI bool isAvailable();
-void compress(ArrayRef<uint8_t> Input,
- SmallVectorImpl<uint8_t> &CompressedBuffer,
- int Level = DefaultCompression, bool EnableLdm = false);
+LLVM_ABI void compress(ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &CompressedBuffer,
+ int Level = DefaultCompression, bool EnableLdm = false);
-Error decompress(ArrayRef<uint8_t> Input, uint8_t *Output,
- size_t &UncompressedSize);
+LLVM_ABI Error decompress(ArrayRef<uint8_t> Input, uint8_t *Output,
+ size_t &UncompressedSize);
-Error decompress(ArrayRef<uint8_t> Input, SmallVectorImpl<uint8_t> &Output,
- size_t UncompressedSize);
+LLVM_ABI Error decompress(ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &Output,
+ size_t UncompressedSize);
} // End of namespace zstd
@@ -109,20 +112,22 @@ struct Params {
// Return nullptr if LLVM was built with support (LLVM_ENABLE_ZLIB,
// LLVM_ENABLE_ZSTD) for the specified compression format; otherwise
// return a string literal describing the reason.
-const char *getReasonIfUnsupported(Format F);
+LLVM_ABI const char *getReasonIfUnsupported(Format F);
// Compress Input with the specified format P.Format. If Level is -1, use
// *::DefaultCompression for the format.
-void compress(Params P, ArrayRef<uint8_t> Input,
- SmallVectorImpl<uint8_t> &Output);
+LLVM_ABI void compress(Params P, ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &Output);
// Decompress Input. The uncompressed size must be available.
-Error decompress(DebugCompressionType T, ArrayRef<uint8_t> Input,
- uint8_t *Output, size_t UncompressedSize);
-Error decompress(Format F, ArrayRef<uint8_t> Input,
- SmallVectorImpl<uint8_t> &Output, size_t UncompressedSize);
-Error decompress(DebugCompressionType T, ArrayRef<uint8_t> Input,
- SmallVectorImpl<uint8_t> &Output, size_t UncompressedSize);
+LLVM_ABI Error decompress(DebugCompressionType T, ArrayRef<uint8_t> Input,
+ uint8_t *Output, size_t UncompressedSize);
+LLVM_ABI Error decompress(Format F, ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &Output,
+ size_t UncompressedSize);
+LLVM_ABI Error decompress(DebugCompressionType T, ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &Output,
+ size_t UncompressedSize);
} // End of namespace compression
diff --git a/llvm/include/llvm/Support/ConvertEBCDIC.h b/llvm/include/llvm/Support/ConvertEBCDIC.h
index ea761b3..1ed88b9 100644
--- a/llvm/include/llvm/Support/ConvertEBCDIC.h
+++ b/llvm/include/llvm/Support/ConvertEBCDIC.h
@@ -15,14 +15,15 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include <system_error>
namespace llvm {
namespace ConverterEBCDIC {
-std::error_code convertToEBCDIC(StringRef Source,
- SmallVectorImpl<char> &Result);
+LLVM_ABI std::error_code convertToEBCDIC(StringRef Source,
+ SmallVectorImpl<char> &Result);
-void convertToUTF8(StringRef Source, SmallVectorImpl<char> &Result);
+LLVM_ABI void convertToUTF8(StringRef Source, SmallVectorImpl<char> &Result);
} // namespace ConverterEBCDIC
} // namespace llvm
diff --git a/llvm/include/llvm/Support/ConvertUTF.h b/llvm/include/llvm/Support/ConvertUTF.h
index 25d4617..dd446f2 100644
--- a/llvm/include/llvm/Support/ConvertUTF.h
+++ b/llvm/include/llvm/Support/ConvertUTF.h
@@ -105,6 +105,7 @@
#ifndef LLVM_SUPPORT_CONVERTUTF_H
#define LLVM_SUPPORT_CONVERTUTF_H
+#include "llvm/Support/Compiler.h"
#include <cstddef>
#include <string>
@@ -157,49 +158,64 @@ typedef enum {
lenientConversion
} ConversionFlags;
-ConversionResult ConvertUTF8toUTF16 (
- const UTF8** sourceStart, const UTF8* sourceEnd,
- UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+LLVM_ABI ConversionResult ConvertUTF8toUTF16(const UTF8 **sourceStart,
+ const UTF8 *sourceEnd,
+ UTF16 **targetStart,
+ UTF16 *targetEnd,
+ ConversionFlags flags);
/**
* Convert a partial UTF8 sequence to UTF32. If the sequence ends in an
* incomplete code unit sequence, returns \c sourceExhausted.
*/
-ConversionResult ConvertUTF8toUTF32Partial(
- const UTF8** sourceStart, const UTF8* sourceEnd,
- UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+LLVM_ABI ConversionResult ConvertUTF8toUTF32Partial(const UTF8 **sourceStart,
+ const UTF8 *sourceEnd,
+ UTF32 **targetStart,
+ UTF32 *targetEnd,
+ ConversionFlags flags);
/**
* Convert a partial UTF8 sequence to UTF32. If the sequence ends in an
* incomplete code unit sequence, returns \c sourceIllegal.
*/
-ConversionResult ConvertUTF8toUTF32(
- const UTF8** sourceStart, const UTF8* sourceEnd,
- UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+LLVM_ABI ConversionResult ConvertUTF8toUTF32(const UTF8 **sourceStart,
+ const UTF8 *sourceEnd,
+ UTF32 **targetStart,
+ UTF32 *targetEnd,
+ ConversionFlags flags);
-ConversionResult ConvertUTF16toUTF8 (
- const UTF16** sourceStart, const UTF16* sourceEnd,
- UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+LLVM_ABI ConversionResult ConvertUTF16toUTF8(const UTF16 **sourceStart,
+ const UTF16 *sourceEnd,
+ UTF8 **targetStart,
+ UTF8 *targetEnd,
+ ConversionFlags flags);
-ConversionResult ConvertUTF32toUTF8 (
- const UTF32** sourceStart, const UTF32* sourceEnd,
- UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+LLVM_ABI ConversionResult ConvertUTF32toUTF8(const UTF32 **sourceStart,
+ const UTF32 *sourceEnd,
+ UTF8 **targetStart,
+ UTF8 *targetEnd,
+ ConversionFlags flags);
-ConversionResult ConvertUTF16toUTF32 (
- const UTF16** sourceStart, const UTF16* sourceEnd,
- UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+LLVM_ABI ConversionResult ConvertUTF16toUTF32(const UTF16 **sourceStart,
+ const UTF16 *sourceEnd,
+ UTF32 **targetStart,
+ UTF32 *targetEnd,
+ ConversionFlags flags);
-ConversionResult ConvertUTF32toUTF16 (
- const UTF32** sourceStart, const UTF32* sourceEnd,
- UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+LLVM_ABI ConversionResult ConvertUTF32toUTF16(const UTF32 **sourceStart,
+ const UTF32 *sourceEnd,
+ UTF16 **targetStart,
+ UTF16 *targetEnd,
+ ConversionFlags flags);
-Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
+LLVM_ABI Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
-Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd);
+LLVM_ABI Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd);
-unsigned getUTF8SequenceSize(const UTF8 *source, const UTF8 *sourceEnd);
+LLVM_ABI unsigned getUTF8SequenceSize(const UTF8 *source,
+ const UTF8 *sourceEnd);
-unsigned getNumBytesForUTF8(UTF8 firstByte);
+LLVM_ABI unsigned getNumBytesForUTF8(UTF8 firstByte);
/*************************************************************************/
/* Below are LLVM-specific wrappers of the functions above. */
@@ -217,27 +233,27 @@ class StringRef;
* the first character which could not be converted.
* \return true on success.
*/
-bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
- char *&ResultPtr, const UTF8 *&ErrorPtr);
+LLVM_ABI bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
+ char *&ResultPtr, const UTF8 *&ErrorPtr);
/**
* Converts a UTF-8 StringRef to a std::wstring.
* \return true on success.
*/
-bool ConvertUTF8toWide(llvm::StringRef Source, std::wstring &Result);
+LLVM_ABI bool ConvertUTF8toWide(llvm::StringRef Source, std::wstring &Result);
/**
* Converts a UTF-8 C-string to a std::wstring.
* \return true on success.
*/
-bool ConvertUTF8toWide(const char *Source, std::wstring &Result);
+LLVM_ABI bool ConvertUTF8toWide(const char *Source, std::wstring &Result);
/**
* Converts a std::wstring to a UTF-8 encoded std::string.
* \return true on success.
*/
-bool convertWideToUTF8(const std::wstring &Source, std::string &Result);
-
+LLVM_ABI bool convertWideToUTF8(const std::wstring &Source,
+ std::string &Result);
/**
* Convert an Unicode code point to UTF8 sequence.
@@ -249,7 +265,7 @@ bool convertWideToUTF8(const std::wstring &Source, std::string &Result);
*
* \returns true on success.
*/
-bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr);
+LLVM_ABI bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr);
/**
* Convert the first UTF8 sequence in the given source buffer to a UTF32
@@ -282,7 +298,7 @@ inline ConversionResult convertUTF8Sequence(const UTF8 **source,
* Returns true if a blob of text starts with a UTF-16 big or little endian byte
* order mark.
*/
-bool hasUTF16ByteOrderMark(ArrayRef<char> SrcBytes);
+LLVM_ABI bool hasUTF16ByteOrderMark(ArrayRef<char> SrcBytes);
/**
* Converts a stream of raw bytes assumed to be UTF16 into a UTF8 std::string.
@@ -291,7 +307,8 @@ bool hasUTF16ByteOrderMark(ArrayRef<char> SrcBytes);
* \param [out] Out Converted UTF-8 is stored here on success.
* \returns true on success
*/
-bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);
+LLVM_ABI bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes,
+ std::string &Out);
/**
* Converts a UTF16 string into a UTF8 std::string.
@@ -300,7 +317,7 @@ bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);
* \param [out] Out Converted UTF-8 is stored here on success.
* \returns true on success
*/
-bool convertUTF16ToUTF8String(ArrayRef<UTF16> Src, std::string &Out);
+LLVM_ABI bool convertUTF16ToUTF8String(ArrayRef<UTF16> Src, std::string &Out);
/**
* Converts a stream of raw bytes assumed to be UTF32 into a UTF8 std::string.
@@ -309,7 +326,8 @@ bool convertUTF16ToUTF8String(ArrayRef<UTF16> Src, std::string &Out);
* \param [out] Out Converted UTF-8 is stored here on success.
* \returns true on success
*/
-bool convertUTF32ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);
+LLVM_ABI bool convertUTF32ToUTF8String(ArrayRef<char> SrcBytes,
+ std::string &Out);
/**
* Converts a UTF32 string into a UTF8 std::string.
@@ -318,27 +336,29 @@ bool convertUTF32ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);
* \param [out] Out Converted UTF-8 is stored here on success.
* \returns true on success
*/
-bool convertUTF32ToUTF8String(ArrayRef<UTF32> Src, std::string &Out);
+LLVM_ABI bool convertUTF32ToUTF8String(ArrayRef<UTF32> Src, std::string &Out);
/**
* Converts a UTF-8 string into a UTF-16 string with native endianness.
*
* \returns true on success
*/
-bool convertUTF8ToUTF16String(StringRef SrcUTF8,
- SmallVectorImpl<UTF16> &DstUTF16);
+LLVM_ABI bool convertUTF8ToUTF16String(StringRef SrcUTF8,
+ SmallVectorImpl<UTF16> &DstUTF16);
#if defined(_WIN32)
namespace sys {
namespace windows {
-std::error_code UTF8ToUTF16(StringRef utf8, SmallVectorImpl<wchar_t> &utf16);
+LLVM_ABI std::error_code UTF8ToUTF16(StringRef utf8,
+ SmallVectorImpl<wchar_t> &utf16);
/// Convert to UTF16 from the current code page used in the system
-std::error_code CurCPToUTF16(StringRef utf8, SmallVectorImpl<wchar_t> &utf16);
-std::error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
- SmallVectorImpl<char> &utf8);
+LLVM_ABI std::error_code CurCPToUTF16(StringRef utf8,
+ SmallVectorImpl<wchar_t> &utf16);
+LLVM_ABI std::error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
+ SmallVectorImpl<char> &utf8);
/// Convert from UTF16 to the current code page used in the system
-std::error_code UTF16ToCurCP(const wchar_t *utf16, size_t utf16_len,
- SmallVectorImpl<char> &utf8);
+LLVM_ABI std::error_code UTF16ToCurCP(const wchar_t *utf16, size_t utf16_len,
+ SmallVectorImpl<char> &utf8);
} // namespace windows
} // namespace sys
#endif
diff --git a/llvm/include/llvm/Support/CrashRecoveryContext.h b/llvm/include/llvm/Support/CrashRecoveryContext.h
index 31293d6..773de89 100644
--- a/llvm/include/llvm/Support/CrashRecoveryContext.h
+++ b/llvm/include/llvm/Support/CrashRecoveryContext.h
@@ -10,6 +10,7 @@
#define LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
class CrashRecoveryContextCleanup;
@@ -48,28 +49,28 @@ class CrashRecoveryContext {
CrashRecoveryContextCleanup *head = nullptr;
public:
- CrashRecoveryContext();
- ~CrashRecoveryContext();
+ LLVM_ABI CrashRecoveryContext();
+ LLVM_ABI ~CrashRecoveryContext();
/// Register cleanup handler, which is used when the recovery context is
/// finished.
/// The recovery context owns the handler.
- void registerCleanup(CrashRecoveryContextCleanup *cleanup);
+ LLVM_ABI void registerCleanup(CrashRecoveryContextCleanup *cleanup);
- void unregisterCleanup(CrashRecoveryContextCleanup *cleanup);
+ LLVM_ABI void unregisterCleanup(CrashRecoveryContextCleanup *cleanup);
/// Enable crash recovery.
- static void Enable();
+ LLVM_ABI static void Enable();
/// Disable crash recovery.
- static void Disable();
+ LLVM_ABI static void Disable();
/// Return the active context, if the code is currently executing in a
/// thread which is in a protected context.
- static CrashRecoveryContext *GetCurrent();
+ LLVM_ABI static CrashRecoveryContext *GetCurrent();
/// Return true if the current thread is recovering from a crash.
- static bool isRecoveringFromCrash();
+ LLVM_ABI static bool isRecoveringFromCrash();
/// Execute the provided callback function (with the given arguments) in
/// a protected context.
@@ -78,7 +79,7 @@ public:
/// function crashed (or HandleCrash was called explicitly). Clients should
/// make as little assumptions as possible about the program state when
/// RunSafely has returned false.
- bool RunSafely(function_ref<void()> Fn);
+ LLVM_ABI bool RunSafely(function_ref<void()> Fn);
bool RunSafely(void (*Fn)(void*), void *UserData) {
return RunSafely([&]() { Fn(UserData); });
}
@@ -91,25 +92,26 @@ public:
///
/// On Darwin, if PRIO_DARWIN_BG is set on the calling thread, it will be
/// propagated to the new thread as well.
- bool RunSafelyOnThread(function_ref<void()>, unsigned RequestedStackSize = 0);
+ LLVM_ABI bool RunSafelyOnThread(function_ref<void()>,
+ unsigned RequestedStackSize = 0);
bool RunSafelyOnThread(void (*Fn)(void*), void *UserData,
unsigned RequestedStackSize = 0) {
return RunSafelyOnThread([&]() { Fn(UserData); }, RequestedStackSize);
}
- bool RunSafelyOnNewStack(function_ref<void()>,
- unsigned RequestedStackSize = 0);
+ LLVM_ABI bool RunSafelyOnNewStack(function_ref<void()>,
+ unsigned RequestedStackSize = 0);
/// Explicitly trigger a crash recovery in the current process, and
/// return failure from RunSafely(). This function does not return.
- [[noreturn]] void HandleExit(int RetCode);
+ [[noreturn]] LLVM_ABI void HandleExit(int RetCode);
/// Return true if RetCode indicates that a signal or an exception occurred.
- static bool isCrash(int RetCode);
+ LLVM_ABI static bool isCrash(int RetCode);
/// Throw again a signal or an exception, after it was catched once by a
/// CrashRecoveryContext.
- static bool throwIfCrash(int RetCode);
+ LLVM_ABI static bool throwIfCrash(int RetCode);
/// In case of a crash, this is the crash identifier.
int RetCode = 0;
@@ -127,7 +129,7 @@ public:
///
/// Cleanup handlers are stored in a double list, which is owned and managed by
/// a crash recovery context.
-class CrashRecoveryContextCleanup {
+class LLVM_ABI CrashRecoveryContextCleanup {
protected:
CrashRecoveryContext *context = nullptr;
CrashRecoveryContextCleanup(CrashRecoveryContext *context)
diff --git a/llvm/include/llvm/Support/DJB.h b/llvm/include/llvm/Support/DJB.h
index 8a04a32..f36eaaf4 100644
--- a/llvm/include/llvm/Support/DJB.h
+++ b/llvm/include/llvm/Support/DJB.h
@@ -14,6 +14,7 @@
#define LLVM_SUPPORT_DJB_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -26,7 +27,7 @@ inline uint32_t djbHash(StringRef Buffer, uint32_t H = 5381) {
/// Computes the Bernstein hash after folding the input according to the Dwarf 5
/// standard case folding rules.
-uint32_t caseFoldingDjbHash(StringRef Buffer, uint32_t H = 5381);
+LLVM_ABI uint32_t caseFoldingDjbHash(StringRef Buffer, uint32_t H = 5381);
} // namespace llvm
#endif // LLVM_SUPPORT_DJB_H
diff --git a/llvm/include/llvm/Support/DataExtractor.h b/llvm/include/llvm/Support/DataExtractor.h
index f4f5905..1f7e45d 100644
--- a/llvm/include/llvm/Support/DataExtractor.h
+++ b/llvm/include/llvm/Support/DataExtractor.h
@@ -10,6 +10,7 @@
#define LLVM_SUPPORT_DATAEXTRACTOR_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"
@@ -161,7 +162,8 @@ public:
/// pointed to by \a offset_ptr is out of bounds, or if the
/// offset plus the length of the C string is out of bounds,
/// a default-initialized StringRef will be returned.
- StringRef getCStrRef(uint64_t *OffsetPtr, Error *Err = nullptr) const;
+ LLVM_ABI StringRef getCStrRef(uint64_t *OffsetPtr,
+ Error *Err = nullptr) const;
/// Extract a C string (as a StringRef) from the location given by the cursor.
/// In case of an extraction error, or if the cursor is already in an error
@@ -200,8 +202,9 @@ public:
/// pointed to by \a OffsetPtr is out of bounds, or if the
/// offset plus the length of the C string is out of bounds,
/// a default-initialized StringRef will be returned.
- StringRef getFixedLengthString(uint64_t *OffsetPtr,
- uint64_t Length, StringRef TrimChars = {"\0", 1}) const;
+ LLVM_ABI StringRef getFixedLengthString(uint64_t *OffsetPtr, uint64_t Length,
+ StringRef TrimChars = {"\0",
+ 1}) const;
/// Extract a fixed number of bytes from the specified offset.
///
@@ -230,8 +233,8 @@ public:
/// A StringRef for the extracted bytes. If the offset pointed to by
/// \a OffsetPtr is out of bounds, or if the offset plus the length
/// is out of bounds, a default-initialized StringRef will be returned.
- StringRef getBytes(uint64_t *OffsetPtr, uint64_t Length,
- Error *Err = nullptr) const;
+ LLVM_ABI StringRef getBytes(uint64_t *OffsetPtr, uint64_t Length,
+ Error *Err = nullptr) const;
/// Extract a fixed number of bytes from the location given by the cursor. In
/// case of an extraction error, or if the cursor is already in an error
@@ -270,8 +273,8 @@ public:
/// @return
/// The unsigned integer value that was extracted, or zero on
/// failure.
- uint64_t getUnsigned(uint64_t *offset_ptr, uint32_t byte_size,
- Error *Err = nullptr) const;
+ LLVM_ABI uint64_t getUnsigned(uint64_t *offset_ptr, uint32_t byte_size,
+ Error *Err = nullptr) const;
/// Extract an unsigned integer of the given size from the location given by
/// the cursor. In case of an extraction error, or if the cursor is already in
@@ -303,7 +306,7 @@ public:
/// @return
/// The sign extended signed integer value that was extracted,
/// or zero on failure.
- int64_t getSigned(uint64_t *offset_ptr, uint32_t size) const;
+ LLVM_ABI int64_t getSigned(uint64_t *offset_ptr, uint32_t size) const;
//------------------------------------------------------------------
/// Extract an pointer from \a *offset_ptr.
@@ -351,7 +354,7 @@ public:
///
/// @return
/// The extracted uint8_t value.
- uint8_t getU8(uint64_t *offset_ptr, Error *Err = nullptr) const;
+ LLVM_ABI uint8_t getU8(uint64_t *offset_ptr, Error *Err = nullptr) const;
/// Extract a single uint8_t value from the location given by the cursor. In
/// case of an extraction error, or if the cursor is already in an error
@@ -381,13 +384,14 @@ public:
/// @return
/// \a dst if all values were properly extracted and copied,
/// NULL otherise.
- uint8_t *getU8(uint64_t *offset_ptr, uint8_t *dst, uint32_t count) const;
+ LLVM_ABI uint8_t *getU8(uint64_t *offset_ptr, uint8_t *dst,
+ uint32_t count) const;
/// Extract \a Count uint8_t values from the location given by the cursor and
/// store them into the destination buffer. In case of an extraction error, or
/// if the cursor is already in an error state, a nullptr is returned and the
/// destination buffer is left unchanged.
- uint8_t *getU8(Cursor &C, uint8_t *Dst, uint32_t Count) const;
+ LLVM_ABI uint8_t *getU8(Cursor &C, uint8_t *Dst, uint32_t Count) const;
/// Extract \a Count uint8_t values from the location given by the cursor and
/// store them into the destination vector. The vector is resized to fit the
@@ -425,7 +429,7 @@ public:
/// @return
/// The extracted uint16_t value.
//------------------------------------------------------------------
- uint16_t getU16(uint64_t *offset_ptr, Error *Err = nullptr) const;
+ LLVM_ABI uint16_t getU16(uint64_t *offset_ptr, Error *Err = nullptr) const;
/// Extract a single uint16_t value from the location given by the cursor. In
/// case of an extraction error, or if the cursor is already in an error
@@ -455,7 +459,8 @@ public:
/// @return
/// \a dst if all values were properly extracted and copied,
/// NULL otherise.
- uint16_t *getU16(uint64_t *offset_ptr, uint16_t *dst, uint32_t count) const;
+ LLVM_ABI uint16_t *getU16(uint64_t *offset_ptr, uint16_t *dst,
+ uint32_t count) const;
/// Extract a 24-bit unsigned value from \a *offset_ptr and return it
/// in a uint32_t.
@@ -478,7 +483,7 @@ public:
///
/// @return
/// The extracted 24-bit value represented in a uint32_t.
- uint32_t getU24(uint64_t *OffsetPtr, Error *Err = nullptr) const;
+ LLVM_ABI uint32_t getU24(uint64_t *OffsetPtr, Error *Err = nullptr) const;
/// Extract a single 24-bit unsigned value from the location given by the
/// cursor. In case of an extraction error, or if the cursor is already in an
@@ -505,7 +510,7 @@ public:
///
/// @return
/// The extracted uint32_t value.
- uint32_t getU32(uint64_t *offset_ptr, Error *Err = nullptr) const;
+ LLVM_ABI uint32_t getU32(uint64_t *offset_ptr, Error *Err = nullptr) const;
/// Extract a single uint32_t value from the location given by the cursor. In
/// case of an extraction error, or if the cursor is already in an error
@@ -535,7 +540,8 @@ public:
/// @return
/// \a dst if all values were properly extracted and copied,
/// NULL otherise.
- uint32_t *getU32(uint64_t *offset_ptr, uint32_t *dst, uint32_t count) const;
+ LLVM_ABI uint32_t *getU32(uint64_t *offset_ptr, uint32_t *dst,
+ uint32_t count) const;
/// Extract a uint64_t value from \a *offset_ptr.
///
@@ -557,7 +563,7 @@ public:
///
/// @return
/// The extracted uint64_t value.
- uint64_t getU64(uint64_t *offset_ptr, Error *Err = nullptr) const;
+ LLVM_ABI uint64_t getU64(uint64_t *offset_ptr, Error *Err = nullptr) const;
/// Extract a single uint64_t value from the location given by the cursor. In
/// case of an extraction error, or if the cursor is already in an error
@@ -587,7 +593,8 @@ public:
/// @return
/// \a dst if all values were properly extracted and copied,
/// NULL otherise.
- uint64_t *getU64(uint64_t *offset_ptr, uint64_t *dst, uint32_t count) const;
+ LLVM_ABI uint64_t *getU64(uint64_t *offset_ptr, uint64_t *dst,
+ uint32_t count) const;
/// Extract a signed LEB128 value from \a *offset_ptr.
///
@@ -611,7 +618,7 @@ public:
///
/// @return
/// The extracted signed integer value.
- int64_t getSLEB128(uint64_t *OffsetPtr, Error *Err = nullptr) const;
+ LLVM_ABI int64_t getSLEB128(uint64_t *OffsetPtr, Error *Err = nullptr) const;
/// Extract an signed LEB128 value from the location given by the cursor.
/// In case of an extraction error, or if the cursor is already in an error
@@ -640,7 +647,8 @@ public:
///
/// @return
/// The extracted unsigned integer value.
- uint64_t getULEB128(uint64_t *offset_ptr, llvm::Error *Err = nullptr) const;
+ LLVM_ABI uint64_t getULEB128(uint64_t *offset_ptr,
+ llvm::Error *Err = nullptr) const;
/// Extract an unsigned LEB128 value from the location given by the cursor.
/// In case of an extraction error, or if the cursor is already in an error
@@ -649,7 +657,7 @@ public:
/// Advance the Cursor position by the given number of bytes. No-op if the
/// cursor is in an error state.
- void skip(Cursor &C, uint64_t Length) const;
+ LLVM_ABI void skip(Cursor &C, uint64_t Length) const;
/// Return true iff the cursor is at the end of the buffer, regardless of the
/// error state of the cursor. The only way both eof and error states can be
diff --git a/llvm/include/llvm/Support/Debug.h b/llvm/include/llvm/Support/Debug.h
index 23c54ac..924d7b2 100644
--- a/llvm/include/llvm/Support/Debug.h
+++ b/llvm/include/llvm/Support/Debug.h
@@ -28,6 +28,8 @@
#ifndef LLVM_SUPPORT_DEBUG_H
#define LLVM_SUPPORT_DEBUG_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class raw_ostream;
@@ -81,7 +83,7 @@ void setCurrentDebugTypes(const char **Types, unsigned Count);
/// is specified. This should probably not be referenced directly, instead, use
/// the DEBUG macro below.
///
-extern bool DebugFlag;
+LLVM_ABI extern bool DebugFlag;
/// EnableDebugBuffering - This defaults to false. If true, the debug
/// stream will install signal handlers to dump any buffered debug
@@ -89,12 +91,12 @@ extern bool DebugFlag;
/// to install signal handlers if they are certain there will be no
/// conflict.
///
-extern bool EnableDebugBuffering;
+LLVM_ABI extern bool EnableDebugBuffering;
/// dbgs() - This returns a reference to a raw_ostream for debugging
/// messages. If debugging is disabled it returns errs(). Use it
/// like: dbgs() << "foo" << "bar";
-raw_ostream &dbgs();
+LLVM_ABI raw_ostream &dbgs();
// DEBUG macro - This macro should be used by passes to emit debug information.
// If the '-debug' option is specified on the commandline, and if this is a
diff --git a/llvm/include/llvm/Support/DebugCounter.h b/llvm/include/llvm/Support/DebugCounter.h
index 8e9dc29..529a9f8 100644
--- a/llvm/include/llvm/Support/DebugCounter.h
+++ b/llvm/include/llvm/Support/DebugCounter.h
@@ -46,6 +46,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/UniqueVector.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include <string>
@@ -58,21 +59,21 @@ public:
struct Chunk {
int64_t Begin;
int64_t End;
- void print(llvm::raw_ostream &OS);
+ LLVM_ABI void print(llvm::raw_ostream &OS);
bool contains(int64_t Idx) { return Idx >= Begin && Idx <= End; }
};
- static void printChunks(raw_ostream &OS, ArrayRef<Chunk>);
+ LLVM_ABI static void printChunks(raw_ostream &OS, ArrayRef<Chunk>);
/// Return true on parsing error and print the error message on the
/// llvm::errs()
- static bool parseChunks(StringRef Str, SmallVector<Chunk> &Res);
+ LLVM_ABI static bool parseChunks(StringRef Str, SmallVector<Chunk> &Res);
/// Returns a reference to the singleton instance.
- static DebugCounter &instance();
+ LLVM_ABI static DebugCounter &instance();
// Used by the command line option parser to push a new value it parsed.
- void push_back(const std::string &);
+ LLVM_ABI void push_back(const std::string &);
// Register a counter with the specified name.
//
@@ -82,7 +83,7 @@ public:
static unsigned registerCounter(StringRef Name, StringRef Desc) {
return instance().addCounter(std::string(Name), std::string(Desc));
}
- static bool shouldExecuteImpl(unsigned CounterName);
+ LLVM_ABI static bool shouldExecuteImpl(unsigned CounterName);
inline static bool shouldExecute(unsigned CounterName) {
if (!isCountingEnabled())
@@ -119,9 +120,9 @@ public:
}
// Dump or print the current counter set into llvm::dbgs().
- LLVM_DUMP_METHOD void dump() const;
+ LLVM_ABI LLVM_DUMP_METHOD void dump() const;
- void print(raw_ostream &OS) const;
+ LLVM_ABI void print(raw_ostream &OS) const;
// Get the counter ID for a given named counter, or return 0 if none is found.
unsigned getCounterId(const std::string &Name) const {
diff --git a/llvm/include/llvm/Support/DivisionByConstantInfo.h b/llvm/include/llvm/Support/DivisionByConstantInfo.h
index caa0b35..cd7a10f 100644
--- a/llvm/include/llvm/Support/DivisionByConstantInfo.h
+++ b/llvm/include/llvm/Support/DivisionByConstantInfo.h
@@ -14,19 +14,20 @@
#define LLVM_SUPPORT_DIVISIONBYCONSTANTINFO_H
#include "llvm/ADT/APInt.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
/// Magic data for optimising signed division by a constant.
struct SignedDivisionByConstantInfo {
- static SignedDivisionByConstantInfo get(const APInt &D);
+ LLVM_ABI static SignedDivisionByConstantInfo get(const APInt &D);
APInt Magic; ///< magic number
unsigned ShiftAmount; ///< shift amount
};
/// Magic data for optimising unsigned division by a constant.
struct UnsignedDivisionByConstantInfo {
- static UnsignedDivisionByConstantInfo
+ LLVM_ABI static UnsignedDivisionByConstantInfo
get(const APInt &D, unsigned LeadingZeros = 0,
bool AllowEvenDivisorOptimization = true);
APInt Magic; ///< magic number
diff --git a/llvm/include/llvm/Support/DynamicLibrary.h b/llvm/include/llvm/Support/DynamicLibrary.h
index 94ee08c..4501bc9 100644
--- a/llvm/include/llvm/Support/DynamicLibrary.h
+++ b/llvm/include/llvm/Support/DynamicLibrary.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_DYNAMICLIBRARY_H
#define LLVM_SUPPORT_DYNAMICLIBRARY_H
+#include "llvm/Support/Compiler.h"
#include <string>
namespace llvm {
@@ -34,7 +35,7 @@ class DynamicLibrary {
// Placeholder whose address represents an invalid library.
// We use this instead of NULL or a pointer-int pair because the OS library
// might define 0 or 1 to be "special" handles, such as "search all".
- static char Invalid;
+ LLVM_ABI static char Invalid;
// Opaque data used to interface with OS-specific dynamic library handling.
void *Data;
@@ -54,7 +55,7 @@ public:
/// Use isValid() to distinguish these cases if it is important.
/// Note that this will \e not search symbols explicitly registered by
/// AddSymbol().
- void *getAddressOfSymbol(const char *symbolName);
+ LLVM_ABI void *getAddressOfSymbol(const char *symbolName);
/// This function permanently loads the dynamic library at the given path
/// using the library load operation from the host operating system. The
@@ -67,16 +68,16 @@ public:
///
/// It is safe to call this function multiple times for the same library.
/// Open a dynamic library permanently.
- static DynamicLibrary getPermanentLibrary(const char *filename,
- std::string *errMsg = nullptr);
+ LLVM_ABI static DynamicLibrary
+ getPermanentLibrary(const char *filename, std::string *errMsg = nullptr);
/// Registers an externally loaded library. The library will be unloaded
/// when the program terminates.
///
/// It is safe to call this function multiple times for the same library,
/// though ownership is only taken if there was no error.
- static DynamicLibrary addPermanentLibrary(void *handle,
- std::string *errMsg = nullptr);
+ LLVM_ABI static DynamicLibrary
+ addPermanentLibrary(void *handle, std::string *errMsg = nullptr);
/// This function permanently loads the dynamic library at the given path.
/// Use this instead of getPermanentLibrary() when you won't need to get
@@ -98,8 +99,8 @@ public:
/// library fails to load.
///
/// It is safe to call this function multiple times for the same library.
- static DynamicLibrary getLibrary(const char *FileName,
- std::string *Err = nullptr);
+ LLVM_ABI static DynamicLibrary getLibrary(const char *FileName,
+ std::string *Err = nullptr);
/// This function closes the dynamic library at the given path, using the
/// library close operation of the host operating system, and there is no
@@ -107,7 +108,7 @@ public:
///
/// This function should be called only if the library was loaded using the
/// getLibrary() function.
- static void closeLibrary(DynamicLibrary &Lib);
+ LLVM_ABI static void closeLibrary(DynamicLibrary &Lib);
enum SearchOrdering {
/// SO_Linker - Search as a call to dlsym(dlopen(NULL)) would when
@@ -123,7 +124,7 @@ public:
/// The default bahaviour is to search loaded libraries in reverse.
SO_LoadOrder = 4
};
- static SearchOrdering SearchOrder; // = SO_Linker
+ LLVM_ABI static SearchOrdering SearchOrder; // = SO_Linker
/// This function will search through all previously loaded dynamic
/// libraries for the symbol \p symbolName. If it is found, the address of
@@ -132,7 +133,7 @@ public:
/// as explicitly registered symbols (AddSymbol()).
/// @throws std::string on error.
/// Search through libraries for address of a symbol
- static void *SearchForAddressOfSymbol(const char *symbolName);
+ LLVM_ABI static void *SearchForAddressOfSymbol(const char *symbolName);
/// Convenience function for C++ophiles.
static void *SearchForAddressOfSymbol(const std::string &symbolName) {
@@ -143,7 +144,7 @@ public:
/// value \p symbolValue. These symbols are searched before any
/// libraries.
/// Add searchable symbol/value pair.
- static void AddSymbol(StringRef symbolName, void *symbolValue);
+ LLVM_ABI static void AddSymbol(StringRef symbolName, void *symbolValue);
class HandleSet;
};
diff --git a/llvm/include/llvm/Support/ELFAttrParserCompact.h b/llvm/include/llvm/Support/ELFAttrParserCompact.h
index 34f740b..e687483 100644
--- a/llvm/include/llvm/Support/ELFAttrParserCompact.h
+++ b/llvm/include/llvm/Support/ELFAttrParserCompact.h
@@ -10,6 +10,7 @@
#define LLVM_SUPPORT_ELFCOMPACTATTRPARSER_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/ELFAttributeParser.h"
#include "llvm/Support/ELFAttributes.h"
@@ -22,7 +23,7 @@ namespace llvm {
class StringRef;
class ScopedPrinter;
-class ELFCompactAttrParser : public ELFAttributeParser {
+class LLVM_ABI ELFCompactAttrParser : public ELFAttributeParser {
StringRef vendor;
std::unordered_map<unsigned, unsigned> attributes;
std::unordered_map<unsigned, StringRef> attributesStr;
diff --git a/llvm/include/llvm/Support/ELFAttrParserExtended.h b/llvm/include/llvm/Support/ELFAttrParserExtended.h
index 68f45fb..1da6665 100644
--- a/llvm/include/llvm/Support/ELFAttrParserExtended.h
+++ b/llvm/include/llvm/Support/ELFAttrParserExtended.h
@@ -10,6 +10,7 @@
#define LLVM_SUPPORT_ELFEXTENDEDATTRPARSER_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/ELFAttributeParser.h"
#include "llvm/Support/ELFAttributes.h"
@@ -21,7 +22,7 @@ namespace llvm {
class StringRef;
class ScopedPrinter;
-class ELFExtendedAttrParser : public ELFAttributeParser {
+class LLVM_ABI ELFExtendedAttrParser : public ELFAttributeParser {
protected:
ScopedPrinter *Sw;
DataExtractor De{ArrayRef<uint8_t>{}, true, 0};
diff --git a/llvm/include/llvm/Support/ELFAttributes.h b/llvm/include/llvm/Support/ELFAttributes.h
index d652e25..270246f 100644
--- a/llvm/include/llvm/Support/ELFAttributes.h
+++ b/llvm/include/llvm/Support/ELFAttributes.h
@@ -11,6 +11,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include <optional>
namespace llvm {
@@ -55,9 +56,10 @@ namespace ELFAttrs {
enum AttrType : unsigned { File = 1, Section = 2, Symbol = 3 };
-StringRef attrTypeAsString(unsigned attr, TagNameMap tagNameMap,
- bool hasTagPrefix = true);
-std::optional<unsigned> attrTypeFromString(StringRef tag, TagNameMap tagNameMap);
+LLVM_ABI StringRef attrTypeAsString(unsigned attr, TagNameMap tagNameMap,
+ bool hasTagPrefix = true);
+LLVM_ABI std::optional<unsigned> attrTypeFromString(StringRef tag,
+ TagNameMap tagNameMap);
// Magic numbers for ELF attributes.
enum AttrMagic { Format_Version = 0x41 };
diff --git a/llvm/include/llvm/Support/Errno.h b/llvm/include/llvm/Support/Errno.h
index e095c66..6481725 100644
--- a/llvm/include/llvm/Support/Errno.h
+++ b/llvm/include/llvm/Support/Errno.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_ERRNO_H
#define LLVM_SUPPORT_ERRNO_H
+#include "llvm/Support/Compiler.h"
#include <cerrno>
#include <string>
@@ -23,10 +24,10 @@ namespace sys {
/// thread-safe variant of strerror() is available. Be sure to call this
/// immediately after the function that set errno, or errno may have been
/// overwritten by an intervening call.
-std::string StrError();
+LLVM_ABI std::string StrError();
/// Like the no-argument version above, but uses \p errnum instead of errno.
-std::string StrError(int errnum);
+LLVM_ABI std::string StrError(int errnum);
template <typename FailT, typename Fun, typename... Args>
inline decltype(auto) RetryAfterSignal(const FailT &Fail, const Fun &F,
diff --git a/llvm/include/llvm/Support/Error.h b/llvm/include/llvm/Support/Error.h
index 43deccb..b0bcdd5 100644
--- a/llvm/include/llvm/Support/Error.h
+++ b/llvm/include/llvm/Support/Error.h
@@ -41,7 +41,7 @@ class ErrorSuccess;
/// Base class for error info classes. Do not extend this directly: Extend
/// the ErrorInfo template subclass instead.
-class ErrorInfoBase {
+class LLVM_ABI ErrorInfoBase {
public:
virtual ~ErrorInfoBase() = default;
@@ -262,7 +262,7 @@ private:
// of debug prints can cause the function to be too large for inlining. So
// it's important that we define this function out of line so that it can't be
// inlined.
- [[noreturn]] void fatalUncheckedError() const;
+ [[noreturn]] LLVM_ABI void fatalUncheckedError() const;
#endif
void assertIsChecked() {
@@ -366,7 +366,7 @@ public:
/// Special ErrorInfo subclass representing a list of ErrorInfos.
/// Instances of this class are constructed by joinError.
-class ErrorList final : public ErrorInfo<ErrorList> {
+class LLVM_ABI ErrorList final : public ErrorInfo<ErrorList> {
// handleErrors needs to be able to iterate the payload list of an
// ErrorList.
template <typename... HandlerTs>
@@ -402,6 +402,10 @@ private:
Payloads.push_back(std::move(Payload2));
}
+ // Explicitly non-copyable.
+ ErrorList(ErrorList const &) = delete;
+ ErrorList &operator=(ErrorList const &) = delete;
+
static Error join(Error E1, Error E2) {
if (!E1)
return E2;
@@ -737,14 +741,15 @@ private:
/// @deprecated Use reportFatalInternalError() or reportFatalUsageError()
/// instead.
-[[noreturn]] void report_fatal_error(Error Err, bool gen_crash_diag = true);
+[[noreturn]] LLVM_ABI void report_fatal_error(Error Err,
+ bool gen_crash_diag = true);
/// Report a fatal error that indicates a bug in LLVM.
/// See ErrorHandling.h for details.
-[[noreturn]] void reportFatalInternalError(Error Err);
+[[noreturn]] LLVM_ABI void reportFatalInternalError(Error Err);
/// Report a fatal error that does not indicate a bug in LLVM.
/// See ErrorHandling.h for details.
-[[noreturn]] void reportFatalUsageError(Error Err);
+[[noreturn]] LLVM_ABI void reportFatalUsageError(Error Err);
/// Report a fatal error if Err is a failure value.
///
@@ -1055,15 +1060,16 @@ Expected<T> handleExpected(Expected<T> ValOrErr, RecoveryFtor &&RecoveryPath,
/// This is useful in the base level of your program to allow clean termination
/// (allowing clean deallocation of resources, etc.), while reporting error
/// information to the user.
-void logAllUnhandledErrors(Error E, raw_ostream &OS, Twine ErrorBanner = {});
+LLVM_ABI void logAllUnhandledErrors(Error E, raw_ostream &OS,
+ Twine ErrorBanner = {});
/// Write all error messages (if any) in E to a string. The newline character
/// is used to separate error messages.
-std::string toString(Error E);
+LLVM_ABI std::string toString(Error E);
/// Like toString(), but does not consume the error. This can be used to print
/// a warning while retaining the original error object.
-std::string toStringWithoutConsuming(const Error &E);
+LLVM_ABI std::string toStringWithoutConsuming(const Error &E);
/// Consume a Error without doing anything. This method should be used
/// only where an error can be considered a reasonable and expected return
@@ -1182,8 +1188,8 @@ private:
/// This is useful if you're writing an interface that returns a Error
/// (or Expected) and you want to call code that still returns
/// std::error_codes.
-class ECError : public ErrorInfo<ECError> {
- friend Error errorCodeToError(std::error_code);
+class LLVM_ABI ECError : public ErrorInfo<ECError> {
+ LLVM_ABI_FRIEND friend Error errorCodeToError(std::error_code);
void anchor() override;
@@ -1208,16 +1214,16 @@ protected:
/// sensible conversion to std::error_code is available, as attempts to convert
/// to/from this error will result in a fatal error. (i.e. it is a programmatic
/// error to try to convert such a value).
-std::error_code inconvertibleErrorCode();
+LLVM_ABI std::error_code inconvertibleErrorCode();
/// Helper for converting an std::error_code to a Error.
-Error errorCodeToError(std::error_code EC);
+LLVM_ABI Error errorCodeToError(std::error_code EC);
/// Helper for converting an ECError to a std::error_code.
///
/// This method requires that Err be Error() or an ECError, otherwise it
/// will trigger a call to abort().
-std::error_code errorToErrorCode(Error Err);
+LLVM_ABI std::error_code errorToErrorCode(Error Err);
/// Helper to get errno as an std::error_code.
///
@@ -1271,7 +1277,7 @@ template <typename T> ErrorOr<T> expectedToErrorOr(Expected<T> &&E) {
/// }
/// @endcode
///
-class StringError : public ErrorInfo<StringError> {
+class LLVM_ABI StringError : public ErrorInfo<StringError> {
public:
static char ID;
@@ -1301,7 +1307,7 @@ inline Error createStringError(std::error_code EC, char const *Fmt,
return make_error<StringError>(Buffer, EC);
}
-Error createStringError(std::string &&Msg, std::error_code EC);
+LLVM_ABI Error createStringError(std::string &&Msg, std::error_code EC);
inline Error createStringError(std::error_code EC, const char *S) {
return createStringError(std::string(S), EC);
@@ -1331,7 +1337,7 @@ inline Error createStringError(std::errc EC, char const *Fmt,
///
/// In some cases, an error needs to live along a 'source' name, in order to
/// show more detailed information to the user.
-class FileError final : public ErrorInfo<FileError> {
+class LLVM_ABI FileError final : public ErrorInfo<FileError> {
friend Error createFileError(const Twine &, Error);
friend Error createFileError(const Twine &, size_t, Error);
diff --git a/llvm/include/llvm/Support/ErrorHandling.h b/llvm/include/llvm/Support/ErrorHandling.h
index d66993b5..4c17b6e8 100644
--- a/llvm/include/llvm/Support/ErrorHandling.h
+++ b/llvm/include/llvm/Support/ErrorHandling.h
@@ -40,11 +40,11 @@ typedef void (*fatal_error_handler_t)(void *user_data, const char *reason,
///
/// \param user_data - An argument which will be passed to the install error
/// handler.
-void install_fatal_error_handler(fatal_error_handler_t handler,
- void *user_data = nullptr);
+LLVM_ABI void install_fatal_error_handler(fatal_error_handler_t handler,
+ void *user_data = nullptr);
/// Restores default error handling behaviour.
-void remove_fatal_error_handler();
+LLVM_ABI void remove_fatal_error_handler();
/// ScopedFatalErrorHandler - This is a simple helper class which just
/// calls install_fatal_error_handler in its constructor and
@@ -60,12 +60,12 @@ struct ScopedFatalErrorHandler {
/// @deprecated Use reportFatalInternalError() or reportFatalUsageError()
/// instead.
-[[noreturn]] void report_fatal_error(const char *reason,
- bool gen_crash_diag = true);
-[[noreturn]] void report_fatal_error(StringRef reason,
- bool gen_crash_diag = true);
-[[noreturn]] void report_fatal_error(const Twine &reason,
- bool gen_crash_diag = true);
+[[noreturn]] LLVM_ABI void report_fatal_error(const char *reason,
+ bool gen_crash_diag = true);
+[[noreturn]] LLVM_ABI void report_fatal_error(StringRef reason,
+ bool gen_crash_diag = true);
+[[noreturn]] LLVM_ABI void report_fatal_error(const Twine &reason,
+ bool gen_crash_diag = true);
/// Report a fatal error that likely indicates a bug in LLVM. It serves a
/// similar purpose as an assertion, but is always enabled, regardless of the
@@ -74,9 +74,9 @@ struct ScopedFatalErrorHandler {
/// This will call installed error handlers (or print the message by default)
/// and then abort. This will produce a crash trace and *will* ask users to
/// report an LLVM bug.
-[[noreturn]] void reportFatalInternalError(const char *reason);
-[[noreturn]] void reportFatalInternalError(StringRef reason);
-[[noreturn]] void reportFatalInternalError(const Twine &reason);
+[[noreturn]] LLVM_ABI void reportFatalInternalError(const char *reason);
+[[noreturn]] LLVM_ABI void reportFatalInternalError(StringRef reason);
+[[noreturn]] LLVM_ABI void reportFatalInternalError(const Twine &reason);
/// Report a fatal error that does not indicate a bug in LLVM.
///
@@ -92,9 +92,9 @@ struct ScopedFatalErrorHandler {
/// This will call installed error handlers (or print the message by default)
/// and then exit with code 1. It will not produce a crash trace and will
/// *not* ask users to report an LLVM bug.
-[[noreturn]] void reportFatalUsageError(const char *reason);
-[[noreturn]] void reportFatalUsageError(StringRef reason);
-[[noreturn]] void reportFatalUsageError(const Twine &reason);
+[[noreturn]] LLVM_ABI void reportFatalUsageError(const char *reason);
+[[noreturn]] LLVM_ABI void reportFatalUsageError(StringRef reason);
+[[noreturn]] LLVM_ABI void reportFatalUsageError(const Twine &reason);
/// Installs a new bad alloc error handler that should be used whenever a
/// bad alloc error, e.g. failing malloc/calloc, is encountered by LLVM.
@@ -112,13 +112,13 @@ struct ScopedFatalErrorHandler {
///
/// \param user_data - An argument which will be passed to the installed error
/// handler.
-void install_bad_alloc_error_handler(fatal_error_handler_t handler,
- void *user_data = nullptr);
+LLVM_ABI void install_bad_alloc_error_handler(fatal_error_handler_t handler,
+ void *user_data = nullptr);
/// Restores default bad alloc error handling behavior.
-void remove_bad_alloc_error_handler();
+LLVM_ABI void remove_bad_alloc_error_handler();
-void install_out_of_memory_new_handler();
+LLVM_ABI void install_out_of_memory_new_handler();
/// Reports a bad alloc error, calling any user defined bad alloc
/// error handler. In contrast to the generic 'report_fatal_error'
@@ -132,15 +132,15 @@ void install_out_of_memory_new_handler();
/// If no error handler is installed (default), throws a bad_alloc exception
/// if LLVM is compiled with exception support. Otherwise prints the error
/// to standard error and calls abort().
-[[noreturn]] void report_bad_alloc_error(const char *Reason,
- bool GenCrashDiag = true);
+[[noreturn]] LLVM_ABI void report_bad_alloc_error(const char *Reason,
+ bool GenCrashDiag = true);
/// This function calls abort(), and prints the optional message to stderr.
/// Use the llvm_unreachable macro (that adds location info), instead of
/// calling this function directly.
-[[noreturn]] void llvm_unreachable_internal(const char *msg = nullptr,
- const char *file = nullptr,
- unsigned line = 0);
+[[noreturn]] LLVM_ABI void llvm_unreachable_internal(const char *msg = nullptr,
+ const char *file = nullptr,
+ unsigned line = 0);
} // namespace llvm
/// Marks that the current location is not supposed to be reachable.
diff --git a/llvm/include/llvm/Support/ExponentialBackoff.h b/llvm/include/llvm/Support/ExponentialBackoff.h
index 8208a74..5269657 100644
--- a/llvm/include/llvm/Support/ExponentialBackoff.h
+++ b/llvm/include/llvm/Support/ExponentialBackoff.h
@@ -13,6 +13,7 @@
#define LLVM_EXPONENTIALBACKOFF_H
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include <chrono>
#include <random>
@@ -50,7 +51,7 @@ public:
/// Blocks while waiting for the next attempt.
/// \returns true if you should try again, false if the timeout has been
/// reached.
- bool waitForNextAttempt();
+ LLVM_ABI bool waitForNextAttempt();
private:
duration MinWait;
diff --git a/llvm/include/llvm/Support/ExtensibleRTTI.h b/llvm/include/llvm/Support/ExtensibleRTTI.h
index e11e3bd..13c3f49 100644
--- a/llvm/include/llvm/Support/ExtensibleRTTI.h
+++ b/llvm/include/llvm/Support/ExtensibleRTTI.h
@@ -60,13 +60,15 @@
#ifndef LLVM_SUPPORT_EXTENSIBLERTTI_H
#define LLVM_SUPPORT_EXTENSIBLERTTI_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
/// Base class for the extensible RTTI hierarchy.
///
/// This class defines virtual methods, dynamicClassID and isA, that enable
/// type comparisons.
-class RTTIRoot {
+class LLVM_ABI RTTIRoot {
public:
virtual ~RTTIRoot() = default;
diff --git a/llvm/include/llvm/Support/FileCollector.h b/llvm/include/llvm/Support/FileCollector.h
index 232dc86..b00bf31 100644
--- a/llvm/include/llvm/Support/FileCollector.h
+++ b/llvm/include/llvm/Support/FileCollector.h
@@ -11,6 +11,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <mutex>
#include <string>
@@ -19,7 +20,7 @@ namespace llvm {
class FileCollectorFileSystem;
class Twine;
-class FileCollectorBase {
+class LLVM_ABI FileCollectorBase {
public:
FileCollectorBase();
virtual ~FileCollectorBase();
@@ -66,7 +67,7 @@ protected:
///
/// In order to preserve the relative topology of files we use their real paths
/// as relative paths inside of the Root.
-class FileCollector : public FileCollectorBase {
+class LLVM_ABI FileCollector : public FileCollectorBase {
public:
/// Helper utility that encapsulates the logic for canonicalizing a virtual
/// path and a path to copy from.
@@ -78,7 +79,7 @@ public:
};
/// Canonicalize a pair of virtual and real paths.
- PathStorage canonicalize(StringRef SrcPath);
+ LLVM_ABI PathStorage canonicalize(StringRef SrcPath);
private:
/// Replace with a (mostly) real path, or don't modify. Resolves symlinks
diff --git a/llvm/include/llvm/Support/FileOutputBuffer.h b/llvm/include/llvm/Support/FileOutputBuffer.h
index d4b7352..d5b73152 100644
--- a/llvm/include/llvm/Support/FileOutputBuffer.h
+++ b/llvm/include/llvm/Support/FileOutputBuffer.h
@@ -14,6 +14,7 @@
#define LLVM_SUPPORT_FILEOUTPUTBUFFER_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"
@@ -45,7 +46,7 @@ public:
/// Otherwise, the file shrinks or grows as necessary based on the value of
/// \p Size. It is an error to specify F_modify and Size=-1 if \p FilePath
/// does not exist.
- static Expected<std::unique_ptr<FileOutputBuffer>>
+ LLVM_ABI static Expected<std::unique_ptr<FileOutputBuffer>>
create(StringRef FilePath, size_t Size, unsigned Flags = 0);
/// Returns a pointer to the start of the buffer.
diff --git a/llvm/include/llvm/Support/FileSystem.h b/llvm/include/llvm/Support/FileSystem.h
index 245e4a24..ae4a212 100644
--- a/llvm/include/llvm/Support/FileSystem.h
+++ b/llvm/include/llvm/Support/FileSystem.h
@@ -31,6 +31,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Chrono.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
@@ -55,7 +56,7 @@ using file_t = void *;
using file_t = int;
#endif
-extern const file_t kInvalidFile;
+LLVM_ABI extern const file_t kInvalidFile;
/// An enumeration for the file system's view of the type.
enum class file_type {
@@ -183,7 +184,7 @@ public:
///
/// Also see comments on \c getLastModificationTime() related to the precision
/// of the returned value.
- TimePoint<> getLastAccessedTime() const;
+ LLVM_ABI TimePoint<> getLastAccessedTime() const;
/// The file modification time as reported from the underlying file system.
///
@@ -192,13 +193,13 @@ public:
/// There is no guarantee for what kind of resolution you can expect, the
/// resolution can differ across platforms and even across mountpoints on the
/// same machine.
- TimePoint<> getLastModificationTime() const;
+ LLVM_ABI TimePoint<> getLastModificationTime() const;
- #if defined(LLVM_ON_UNIX)
+#if defined(LLVM_ON_UNIX)
uint32_t getUser() const { return fs_st_uid; }
uint32_t getGroup() const { return fs_st_gid; }
uint64_t getSize() const { return fs_st_size; }
- #elif defined (_WIN32)
+#elif defined(_WIN32)
uint32_t getUser() const {
return 9999; // Not applicable to Windows, so...
}
@@ -210,7 +211,7 @@ public:
uint64_t getSize() const {
return (uint64_t(FileSizeHigh) << 32) + FileSizeLow;
}
- #endif
+#endif
// setters
void type(file_type v) { Type = v; }
@@ -219,17 +220,17 @@ public:
/// Represents the result of a call to sys::fs::status().
class file_status : public basic_file_status {
- friend bool equivalent(file_status A, file_status B);
+ LLVM_ABI_FRIEND friend bool equivalent(file_status A, file_status B);
- #if defined(LLVM_ON_UNIX)
+#if defined(LLVM_ON_UNIX)
dev_t fs_st_dev = 0;
nlink_t fs_st_nlinks = 0;
ino_t fs_st_ino = 0;
- #elif defined (_WIN32)
+#elif defined(_WIN32)
uint32_t NumLinks = 0;
uint32_t VolumeSerialNumber = 0;
uint64_t PathHash = 0;
- #endif
+#endif
public:
file_status() = default;
@@ -257,8 +258,8 @@ public:
PathHash(PathHash) {}
#endif
- UniqueID getUniqueID() const;
- uint32_t getLinkCount() const;
+ LLVM_ABI UniqueID getUniqueID() const;
+ LLVM_ABI uint32_t getLinkCount() const;
};
/// @}
@@ -274,7 +275,8 @@ public:
/// relative/../path => <current-directory>/relative/../path
///
/// @param path A path that is modified to be an absolute path.
-void make_absolute(const Twine &current_directory, SmallVectorImpl<char> &path);
+LLVM_ABI void make_absolute(const Twine &current_directory,
+ SmallVectorImpl<char> &path);
/// Make \a path an absolute path.
///
@@ -287,7 +289,7 @@ void make_absolute(const Twine &current_directory, SmallVectorImpl<char> &path);
/// @param path A path that is modified to be an absolute path.
/// @returns errc::success if \a path has been made absolute, otherwise a
/// platform-specific error_code.
-std::error_code make_absolute(SmallVectorImpl<char> &path);
+LLVM_ABI std::error_code make_absolute(SmallVectorImpl<char> &path);
/// Create all the non-existent directories in path.
///
@@ -295,9 +297,9 @@ std::error_code make_absolute(SmallVectorImpl<char> &path);
/// @returns errc::success if is_directory(path), otherwise a platform
/// specific error_code. If IgnoreExisting is false, also returns
/// error if the directory already existed.
-std::error_code create_directories(const Twine &path,
- bool IgnoreExisting = true,
- perms Perms = owner_all | group_all);
+LLVM_ABI std::error_code
+create_directories(const Twine &path, bool IgnoreExisting = true,
+ perms Perms = owner_all | group_all);
/// Create the directory in path.
///
@@ -305,8 +307,9 @@ std::error_code create_directories(const Twine &path,
/// @returns errc::success if is_directory(path), otherwise a platform
/// specific error_code. If IgnoreExisting is false, also returns
/// error if the directory already existed.
-std::error_code create_directory(const Twine &path, bool IgnoreExisting = true,
- perms Perms = owner_all | group_all);
+LLVM_ABI std::error_code create_directory(const Twine &path,
+ bool IgnoreExisting = true,
+ perms Perms = owner_all | group_all);
/// Create a link from \a from to \a to.
///
@@ -319,7 +322,7 @@ std::error_code create_directory(const Twine &path, bool IgnoreExisting = true,
/// @param from The path to hard link from. This is created.
/// @returns errc::success if the link was created, otherwise a platform
/// specific error_code.
-std::error_code create_link(const Twine &to, const Twine &from);
+LLVM_ABI std::error_code create_link(const Twine &to, const Twine &from);
/// Create a hard link from \a from to \a to, or return an error.
///
@@ -327,7 +330,7 @@ std::error_code create_link(const Twine &to, const Twine &from);
/// @param from The path to hard link from. This is created.
/// @returns errc::success if the link was created, otherwise a platform
/// specific error_code.
-std::error_code create_hard_link(const Twine &to, const Twine &from);
+LLVM_ABI std::error_code create_hard_link(const Twine &to, const Twine &from);
/// Collapse all . and .. patterns, resolve all symlinks, and optionally
/// expand ~ expressions to the user's home directory.
@@ -336,28 +339,29 @@ std::error_code create_hard_link(const Twine &to, const Twine &from);
/// @param output The location to store the resolved path.
/// @param expand_tilde If true, resolves ~ expressions to the user's home
/// directory.
-std::error_code real_path(const Twine &path, SmallVectorImpl<char> &output,
- bool expand_tilde = false);
+LLVM_ABI std::error_code real_path(const Twine &path,
+ SmallVectorImpl<char> &output,
+ bool expand_tilde = false);
/// Expands ~ expressions to the user's home directory. On Unix ~user
/// directories are resolved as well.
///
/// @param path The path to resolve.
-void expand_tilde(const Twine &path, SmallVectorImpl<char> &output);
+LLVM_ABI void expand_tilde(const Twine &path, SmallVectorImpl<char> &output);
/// Get the current path.
///
/// @param result Holds the current path on return.
/// @returns errc::success if the current path has been stored in result,
/// otherwise a platform-specific error_code.
-std::error_code current_path(SmallVectorImpl<char> &result);
+LLVM_ABI std::error_code current_path(SmallVectorImpl<char> &result);
/// Set the current path.
///
/// @param path The path to set.
/// @returns errc::success if the current path was successfully set,
/// otherwise a platform-specific error_code.
-std::error_code set_current_path(const Twine &path);
+LLVM_ABI std::error_code set_current_path(const Twine &path);
/// Remove path. Equivalent to POSIX remove().
///
@@ -365,14 +369,16 @@ std::error_code set_current_path(const Twine &path);
/// @returns errc::success if path has been removed or didn't exist, otherwise a
/// platform-specific error code. If IgnoreNonExisting is false, also
/// returns error if the file didn't exist.
-std::error_code remove(const Twine &path, bool IgnoreNonExisting = true);
+LLVM_ABI std::error_code remove(const Twine &path,
+ bool IgnoreNonExisting = true);
/// Recursively delete a directory.
///
/// @param path Input path.
/// @returns errc::success if path has been removed or didn't exist, otherwise a
/// platform-specific error code.
-std::error_code remove_directories(const Twine &path, bool IgnoreErrors = true);
+LLVM_ABI std::error_code remove_directories(const Twine &path,
+ bool IgnoreErrors = true);
/// Rename \a from to \a to.
///
@@ -382,19 +388,19 @@ std::error_code remove_directories(const Twine &path, bool IgnoreErrors = true);
///
/// @param from The path to rename from.
/// @param to The path to rename to. This is created.
-std::error_code rename(const Twine &from, const Twine &to);
+LLVM_ABI std::error_code rename(const Twine &from, const Twine &to);
/// Copy the contents of \a From to \a To.
///
/// @param From The path to copy from.
/// @param To The path to copy to. This is created.
-std::error_code copy_file(const Twine &From, const Twine &To);
+LLVM_ABI std::error_code copy_file(const Twine &From, const Twine &To);
/// Copy the contents of \a From to \a To.
///
/// @param From The path to copy from.
/// @param ToFD The open file descriptor of the destination file.
-std::error_code copy_file(const Twine &From, int ToFD);
+LLVM_ABI std::error_code copy_file(const Twine &From, int ToFD);
/// Resize path to size. File is resized as if by POSIX truncate().
///
@@ -402,7 +408,7 @@ std::error_code copy_file(const Twine &From, int ToFD);
/// @param Size Size to resize to.
/// @returns errc::success if \a path has been resized to \a size, otherwise a
/// platform-specific error_code.
-std::error_code resize_file(int FD, uint64_t Size);
+LLVM_ABI std::error_code resize_file(int FD, uint64_t Size);
/// Resize \p FD to \p Size before mapping \a mapped_file_region::readwrite. On
/// non-Windows, this calls \a resize_file(). On Windows, this is a no-op,
@@ -424,10 +430,10 @@ inline std::error_code resize_file_before_mapping_readwrite(int FD,
/// @param FD Input file descriptor.
/// @returns An MD5Result with the hash computed, if successful, otherwise a
/// std::error_code.
-ErrorOr<MD5::MD5Result> md5_contents(int FD);
+LLVM_ABI ErrorOr<MD5::MD5Result> md5_contents(int FD);
/// Version of compute_md5 that doesn't require an open file descriptor.
-ErrorOr<MD5::MD5Result> md5_contents(const Twine &Path);
+LLVM_ABI ErrorOr<MD5::MD5Result> md5_contents(const Twine &Path);
/// @}
/// @name Physical Observers
@@ -438,7 +444,7 @@ ErrorOr<MD5::MD5Result> md5_contents(const Twine &Path);
/// @param status A basic_file_status previously returned from stat.
/// @returns True if the file represented by status exists, false if it does
/// not.
-bool exists(const basic_file_status &status);
+LLVM_ABI bool exists(const basic_file_status &status);
enum class AccessMode { Exist, Write, Execute };
@@ -447,7 +453,7 @@ enum class AccessMode { Exist, Write, Execute };
/// @param Path Input path.
/// @returns errc::success if the path can be accessed, otherwise a
/// platform-specific error_code.
-std::error_code access(const Twine &Path, AccessMode Mode);
+LLVM_ABI std::error_code access(const Twine &Path, AccessMode Mode);
/// Does file exist?
///
@@ -461,7 +467,7 @@ inline bool exists(const Twine &Path) {
///
/// @param Path Input path.
/// @returns True if we can execute it, false otherwise.
-bool can_execute(const Twine &Path);
+LLVM_ABI bool can_execute(const Twine &Path);
/// Can we write this file?
///
@@ -480,7 +486,7 @@ inline bool can_write(const Twine &Path) {
///
/// @returns True if A and B both represent the same file system entity, false
/// otherwise.
-bool equivalent(file_status A, file_status B);
+LLVM_ABI bool equivalent(file_status A, file_status B);
/// Do paths represent the same thing?
///
@@ -492,7 +498,8 @@ bool equivalent(file_status A, file_status B);
/// inode (or equivalent).
/// @returns errc::success if result has been successfully set, otherwise a
/// platform-specific error_code.
-std::error_code equivalent(const Twine &A, const Twine &B, bool &result);
+LLVM_ABI std::error_code equivalent(const Twine &A, const Twine &B,
+ bool &result);
/// Simpler version of equivalent for clients that don't need to
/// differentiate between an error and false.
@@ -508,10 +515,10 @@ inline bool equivalent(const Twine &A, const Twine &B) {
/// false if it is not.
/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
-std::error_code is_local(const Twine &path, bool &result);
+LLVM_ABI std::error_code is_local(const Twine &path, bool &result);
/// Version of is_local accepting an open file descriptor.
-std::error_code is_local(int FD, bool &result);
+LLVM_ABI std::error_code is_local(int FD, bool &result);
/// Simpler version of is_local for clients that don't need to
/// differentiate between an error and false.
@@ -533,13 +540,13 @@ inline bool is_local(int FD) {
/// @param Follow For symbolic links, indicates whether to return the file type
/// of the link itself, or of the target.
/// @returns A value from the file_type enumeration indicating the type of file.
-file_type get_file_type(const Twine &Path, bool Follow = true);
+LLVM_ABI file_type get_file_type(const Twine &Path, bool Follow = true);
/// Does status represent a directory?
///
/// @param status A basic_file_status previously returned from status.
/// @returns status.type() == file_type::directory_file.
-bool is_directory(const basic_file_status &status);
+LLVM_ABI bool is_directory(const basic_file_status &status);
/// Is path a directory?
///
@@ -548,7 +555,7 @@ bool is_directory(const basic_file_status &status);
/// symlinks, false if it is not. Undefined otherwise.
/// @returns errc::success if result has been successfully set, otherwise a
/// platform-specific error_code.
-std::error_code is_directory(const Twine &path, bool &result);
+LLVM_ABI std::error_code is_directory(const Twine &path, bool &result);
/// Simpler version of is_directory for clients that don't need to
/// differentiate between an error and false.
@@ -561,7 +568,7 @@ inline bool is_directory(const Twine &Path) {
///
/// @param status A basic_file_status previously returned from status.
/// @returns status_known(status) && status.type() == file_type::regular_file.
-bool is_regular_file(const basic_file_status &status);
+LLVM_ABI bool is_regular_file(const basic_file_status &status);
/// Is path a regular file?
///
@@ -570,7 +577,7 @@ bool is_regular_file(const basic_file_status &status);
/// symlinks), false if it is not. Undefined otherwise.
/// @returns errc::success if result has been successfully set, otherwise a
/// platform-specific error_code.
-std::error_code is_regular_file(const Twine &path, bool &result);
+LLVM_ABI std::error_code is_regular_file(const Twine &path, bool &result);
/// Simpler version of is_regular_file for clients that don't need to
/// differentiate between an error and false.
@@ -585,7 +592,7 @@ inline bool is_regular_file(const Twine &Path) {
///
/// @param status A basic_file_status previously returned from status.
/// @returns status_known(status) && status.type() == file_type::symlink_file.
-bool is_symlink_file(const basic_file_status &status);
+LLVM_ABI bool is_symlink_file(const basic_file_status &status);
/// Is path a symlink file?
///
@@ -594,7 +601,7 @@ bool is_symlink_file(const basic_file_status &status);
/// Undefined otherwise.
/// @returns errc::success if result has been successfully set, otherwise a
/// platform-specific error_code.
-std::error_code is_symlink_file(const Twine &path, bool &result);
+LLVM_ABI std::error_code is_symlink_file(const Twine &path, bool &result);
/// Simpler version of is_symlink_file for clients that don't need to
/// differentiate between an error and false.
@@ -610,7 +617,7 @@ inline bool is_symlink_file(const Twine &Path) {
///
/// @param status A basic_file_status previously returned from status.
/// @returns exists(s) && !is_regular_file(s) && !is_directory(s)
-bool is_other(const basic_file_status &status);
+LLVM_ABI bool is_other(const basic_file_status &status);
/// Is path something that exists but is not a directory,
/// regular file, or symlink?
@@ -620,7 +627,7 @@ bool is_other(const basic_file_status &status);
/// file, or a symlink, false if it does not. Undefined otherwise.
/// @returns errc::success if result has been successfully set, otherwise a
/// platform-specific error_code.
-std::error_code is_other(const Twine &path, bool &result);
+LLVM_ABI std::error_code is_other(const Twine &path, bool &result);
/// Get file status as if by POSIX stat().
///
@@ -630,15 +637,15 @@ std::error_code is_other(const Twine &path, bool &result);
/// statted.
/// @returns errc::success if result has been successfully set, otherwise a
/// platform-specific error_code.
-std::error_code status(const Twine &path, file_status &result,
- bool follow = true);
+LLVM_ABI std::error_code status(const Twine &path, file_status &result,
+ bool follow = true);
/// A version for when a file descriptor is already available.
-std::error_code status(int FD, file_status &Result);
+LLVM_ABI std::error_code status(int FD, file_status &Result);
#ifdef _WIN32
/// A version for when a file descriptor is already available.
-std::error_code status(file_t FD, file_status &Result);
+LLVM_ABI std::error_code status(file_t FD, file_status &Result);
#endif
/// Get file creation mode mask of the process.
@@ -647,7 +654,7 @@ std::error_code status(file_t FD, file_status &Result);
/// @note There is no umask on Windows. This function returns 0 always
/// on Windows. This function does not return an error_code because
/// umask(2) never fails. It is not thread safe.
-unsigned getUmask();
+LLVM_ABI unsigned getUmask();
/// Set file permissions.
///
@@ -658,12 +665,12 @@ unsigned getUmask();
/// @note On Windows, all permissions except *_write are ignored. Using any of
/// owner_write, group_write, or all_write will make the file writable.
/// Otherwise, the file will be marked as read-only.
-std::error_code setPermissions(const Twine &Path, perms Permissions);
+LLVM_ABI std::error_code setPermissions(const Twine &Path, perms Permissions);
/// Vesion of setPermissions accepting a file descriptor.
/// TODO Delete the path based overload once we implement the FD based overload
/// on Windows.
-std::error_code setPermissions(int FD, perms Permissions);
+LLVM_ABI std::error_code setPermissions(int FD, perms Permissions);
/// Get file permissions.
///
@@ -673,7 +680,7 @@ std::error_code setPermissions(int FD, perms Permissions);
/// @note On Windows, if the file does not have the FILE_ATTRIBUTE_READONLY
/// attribute, all_all will be returned. Otherwise, all_read | all_exe
/// will be returned.
-ErrorOr<perms> getPermissions(const Twine &Path);
+LLVM_ABI ErrorOr<perms> getPermissions(const Twine &Path);
/// Get file size.
///
@@ -695,8 +702,9 @@ inline std::error_code file_size(const Twine &Path, uint64_t &Result) {
/// @returns errc::success if the file times were successfully set, otherwise a
/// platform-specific error_code or errc::function_not_supported on
/// platforms where the functionality isn't available.
-std::error_code setLastAccessAndModificationTime(int FD, TimePoint<> AccessTime,
- TimePoint<> ModificationTime);
+LLVM_ABI std::error_code
+setLastAccessAndModificationTime(int FD, TimePoint<> AccessTime,
+ TimePoint<> ModificationTime);
/// Simpler version that sets both file modification and access time to the same
/// time.
@@ -709,7 +717,7 @@ inline std::error_code setLastAccessAndModificationTime(int FD,
///
/// @param s Input file status.
/// @returns True if status() != status_error.
-bool status_known(const basic_file_status &s);
+LLVM_ABI bool status_known(const basic_file_status &s);
/// Is status available?
///
@@ -717,7 +725,7 @@ bool status_known(const basic_file_status &s);
/// @param result Set to true if status() != status_error.
/// @returns errc::success if result has been successfully set, otherwise a
/// platform-specific error_code.
-std::error_code status_known(const Twine &path, bool &result);
+LLVM_ABI std::error_code status_known(const Twine &path, bool &result);
enum CreationDisposition : unsigned {
/// CD_CreateAlways - When opening a file:
@@ -796,8 +804,9 @@ enum OpenFlags : unsigned {
/// @param Model Name to base unique path off of.
/// @param ResultPath Set to the file's path.
/// @param MakeAbsolute Whether to use the system temp directory.
-void createUniquePath(const Twine &Model, SmallVectorImpl<char> &ResultPath,
- bool MakeAbsolute);
+LLVM_ABI void createUniquePath(const Twine &Model,
+ SmallVectorImpl<char> &ResultPath,
+ bool MakeAbsolute);
/// Create a uniquely named file.
///
@@ -822,16 +831,16 @@ void createUniquePath(const Twine &Model, SmallVectorImpl<char> &ResultPath,
/// @param Mode Set to the opened file's permissions.
/// @returns errc::success if Result{FD,Path} have been successfully set,
/// otherwise a platform-specific error_code.
-std::error_code createUniqueFile(const Twine &Model, int &ResultFD,
- SmallVectorImpl<char> &ResultPath,
- OpenFlags Flags = OF_None,
- unsigned Mode = all_read | all_write);
+LLVM_ABI std::error_code createUniqueFile(const Twine &Model, int &ResultFD,
+ SmallVectorImpl<char> &ResultPath,
+ OpenFlags Flags = OF_None,
+ unsigned Mode = all_read | all_write);
/// Simpler version for clients that don't want an open file. An empty
/// file will still be created.
-std::error_code createUniqueFile(const Twine &Model,
- SmallVectorImpl<char> &ResultPath,
- unsigned Mode = all_read | all_write);
+LLVM_ABI std::error_code createUniqueFile(const Twine &Model,
+ SmallVectorImpl<char> &ResultPath,
+ unsigned Mode = all_read | all_write);
/// Represents a temporary file.
///
@@ -842,16 +851,16 @@ std::error_code createUniqueFile(const Twine &Model,
/// properly handle errors in a destructor.
class TempFile {
bool Done = false;
- TempFile(StringRef Name, int FD);
+ LLVM_ABI TempFile(StringRef Name, int FD);
public:
/// This creates a temporary file with createUniqueFile and schedules it for
/// deletion with sys::RemoveFileOnSignal.
- static Expected<TempFile> create(const Twine &Model,
- unsigned Mode = all_read | all_write,
- OpenFlags ExtraFlags = OF_None);
- TempFile(TempFile &&Other);
- TempFile &operator=(TempFile &&Other);
+ LLVM_ABI static Expected<TempFile>
+ create(const Twine &Model, unsigned Mode = all_read | all_write,
+ OpenFlags ExtraFlags = OF_None);
+ LLVM_ABI TempFile(TempFile &&Other);
+ LLVM_ABI TempFile &operator=(TempFile &&Other);
// Name of the temporary file.
std::string TmpName;
@@ -865,16 +874,16 @@ public:
#endif
// Keep this with the given name.
- Error keep(const Twine &Name);
+ LLVM_ABI Error keep(const Twine &Name);
// Keep this with the temporary name.
- Error keep();
+ LLVM_ABI Error keep();
// Delete the file.
- Error discard();
+ LLVM_ABI Error discard();
// This checks that keep or delete was called.
- ~TempFile();
+ LLVM_ABI ~TempFile();
};
/// Create a file in the system temporary directory.
@@ -885,19 +894,20 @@ public:
///
/// This should be used for things like a temporary .s that is removed after
/// running the assembler.
-std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
- int &ResultFD,
- SmallVectorImpl<char> &ResultPath,
- OpenFlags Flags = OF_None);
+LLVM_ABI std::error_code createTemporaryFile(const Twine &Prefix,
+ StringRef Suffix, int &ResultFD,
+ SmallVectorImpl<char> &ResultPath,
+ OpenFlags Flags = OF_None);
/// Simpler version for clients that don't want an open file. An empty
/// file will still be created.
-std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
- SmallVectorImpl<char> &ResultPath,
- OpenFlags Flags = OF_None);
+LLVM_ABI std::error_code createTemporaryFile(const Twine &Prefix,
+ StringRef Suffix,
+ SmallVectorImpl<char> &ResultPath,
+ OpenFlags Flags = OF_None);
-std::error_code createUniqueDirectory(const Twine &Prefix,
- SmallVectorImpl<char> &ResultPath);
+LLVM_ABI std::error_code
+createUniqueDirectory(const Twine &Prefix, SmallVectorImpl<char> &ResultPath);
/// Get a unique name, not currently exisiting in the filesystem. Subject
/// to race conditions, prefer to use createUniqueFile instead.
@@ -906,8 +916,9 @@ std::error_code createUniqueDirectory(const Twine &Prefix,
/// checks if it exists. This function is subject to race conditions, if you
/// want to use the returned name to actually create a file, use
/// createUniqueFile instead.
-std::error_code getPotentiallyUniqueFileName(const Twine &Model,
- SmallVectorImpl<char> &ResultPath);
+LLVM_ABI std::error_code
+getPotentiallyUniqueFileName(const Twine &Model,
+ SmallVectorImpl<char> &ResultPath);
/// Get a unique temporary file name, not currently exisiting in the
/// filesystem. Subject to race conditions, prefer to use createTemporaryFile
@@ -917,7 +928,7 @@ std::error_code getPotentiallyUniqueFileName(const Twine &Model,
/// checks if it exists. This function is subject to race conditions, if you
/// want to use the returned name to actually create a file, use
/// createTemporaryFile instead.
-std::error_code
+LLVM_ABI std::error_code
getPotentiallyUniqueTempFileName(const Twine &Prefix, StringRef Suffix,
SmallVectorImpl<char> &ResultPath);
@@ -955,9 +966,9 @@ inline FileAccess &operator|=(FileAccess &A, FileAccess B) {
/// @param Mode The access permissions of the file, represented in octal.
/// @returns errc::success if \a Name has been opened, otherwise a
/// platform-specific error_code.
-std::error_code openFile(const Twine &Name, int &ResultFD,
- CreationDisposition Disp, FileAccess Access,
- OpenFlags Flags, unsigned Mode = 0666);
+LLVM_ABI std::error_code openFile(const Twine &Name, int &ResultFD,
+ CreationDisposition Disp, FileAccess Access,
+ OpenFlags Flags, unsigned Mode = 0666);
/// @brief Opens a file with the specified creation disposition, access mode,
/// and flags and returns a platform-specific file object.
@@ -973,14 +984,15 @@ std::error_code openFile(const Twine &Name, int &ResultFD,
/// @param Mode The access permissions of the file, represented in octal.
/// @returns errc::success if \a Name has been opened, otherwise a
/// platform-specific error_code.
-Expected<file_t> openNativeFile(const Twine &Name, CreationDisposition Disp,
- FileAccess Access, OpenFlags Flags,
- unsigned Mode = 0666);
+LLVM_ABI Expected<file_t> openNativeFile(const Twine &Name,
+ CreationDisposition Disp,
+ FileAccess Access, OpenFlags Flags,
+ unsigned Mode = 0666);
/// Converts from a Posix file descriptor number to a native file handle.
/// On Windows, this retreives the underlying handle. On non-Windows, this is a
/// no-op.
-file_t convertFDToNativeFile(int FD);
+LLVM_ABI file_t convertFDToNativeFile(int FD);
#ifndef _WIN32
inline file_t convertFDToNativeFile(int FD) { return FD; }
@@ -988,15 +1000,15 @@ inline file_t convertFDToNativeFile(int FD) { return FD; }
/// Return an open handle to standard in. On Unix, this is typically FD 0.
/// Returns kInvalidFile when the stream is closed.
-file_t getStdinHandle();
+LLVM_ABI file_t getStdinHandle();
/// Return an open handle to standard out. On Unix, this is typically FD 1.
/// Returns kInvalidFile when the stream is closed.
-file_t getStdoutHandle();
+LLVM_ABI file_t getStdoutHandle();
/// Return an open handle to standard error. On Unix, this is typically FD 2.
/// Returns kInvalidFile when the stream is closed.
-file_t getStderrHandle();
+LLVM_ABI file_t getStderrHandle();
/// Reads \p Buf.size() bytes from \p FileHandle into \p Buf. Returns the number
/// of bytes actually read. On Unix, this is equivalent to `return ::read(FD,
@@ -1005,7 +1017,8 @@ file_t getStderrHandle();
/// @param FileHandle File to read from.
/// @param Buf Buffer to read into.
/// @returns The number of bytes read, or error.
-Expected<size_t> readNativeFile(file_t FileHandle, MutableArrayRef<char> Buf);
+LLVM_ABI Expected<size_t> readNativeFile(file_t FileHandle,
+ MutableArrayRef<char> Buf);
/// Default chunk size for \a readNativeFileToEOF().
enum : size_t { DefaultReadChunkSize = 4 * 4096 };
@@ -1023,8 +1036,9 @@ enum : size_t { DefaultReadChunkSize = 4 * 4096 };
/// \param Buffer Where to put the file content.
/// \param ChunkSize Size of chunks.
/// \returns The error if EOF was not found.
-Error readNativeFileToEOF(file_t FileHandle, SmallVectorImpl<char> &Buffer,
- ssize_t ChunkSize = DefaultReadChunkSize);
+LLVM_ABI Error readNativeFileToEOF(file_t FileHandle,
+ SmallVectorImpl<char> &Buffer,
+ ssize_t ChunkSize = DefaultReadChunkSize);
/// Reads \p Buf.size() bytes from \p FileHandle at offset \p Offset into \p
/// Buf. If 'pread' is available, this will use that, otherwise it will use
@@ -1035,9 +1049,9 @@ Error readNativeFileToEOF(file_t FileHandle, SmallVectorImpl<char> &Buffer,
/// @param Buf Buffer to read into.
/// @param Offset Offset into the file at which the read should occur.
/// @returns The number of bytes read, or error.
-Expected<size_t> readNativeFileSlice(file_t FileHandle,
- MutableArrayRef<char> Buf,
- uint64_t Offset);
+LLVM_ABI Expected<size_t> readNativeFileSlice(file_t FileHandle,
+ MutableArrayRef<char> Buf,
+ uint64_t Offset);
/// @brief Opens the file with the given name in a write-only or read-write
/// mode, returning its open file descriptor. If the file does not exist, it
@@ -1137,9 +1151,9 @@ inline Expected<file_t> openNativeFileForReadWrite(const Twine &Name,
/// location.
/// @returns errc::success if \a Name has been opened, otherwise a
/// platform-specific error_code.
-std::error_code openFileForRead(const Twine &Name, int &ResultFD,
- OpenFlags Flags = OF_None,
- SmallVectorImpl<char> *RealPath = nullptr);
+LLVM_ABI std::error_code
+openFileForRead(const Twine &Name, int &ResultFD, OpenFlags Flags = OF_None,
+ SmallVectorImpl<char> *RealPath = nullptr);
/// @brief Opens the file with the given name in a read-only mode, returning
/// its open file descriptor.
@@ -1153,7 +1167,7 @@ std::error_code openFileForRead(const Twine &Name, int &ResultFD,
/// location.
/// @returns a platform-specific file descriptor if \a Name has been opened,
/// otherwise an error object.
-Expected<file_t>
+LLVM_ABI Expected<file_t>
openNativeFileForRead(const Twine &Name, OpenFlags Flags = OF_None,
SmallVectorImpl<char> *RealPath = nullptr);
@@ -1178,21 +1192,21 @@ openNativeFileForRead(const Twine &Name, OpenFlags Flags = OF_None,
/// context, as it may not prevent other threads in the same process from
/// obtaining a lock on the same file, even if they are using a different file
/// descriptor.
-std::error_code
+LLVM_ABI std::error_code
tryLockFile(int FD,
std::chrono::milliseconds Timeout = std::chrono::milliseconds(0));
/// Lock the file.
///
/// This function acts as @ref tryLockFile but it waits infinitely.
-std::error_code lockFile(int FD);
+LLVM_ABI std::error_code lockFile(int FD);
/// Unlock the file.
///
/// @param FD The descriptor representing the file to unlock.
/// @returns errc::success if lock is successfully released or platform-specific
/// error_code otherwise.
-std::error_code unlockFile(int FD);
+LLVM_ABI std::error_code unlockFile(int FD);
/// @brief Close the file object. This should be used instead of ::close for
/// portability. On error, the caller should assume the file is closed, as is
@@ -1203,7 +1217,7 @@ std::error_code unlockFile(int FD);
///
/// @returns An error code if closing the file failed. Typically, an error here
/// means that the filesystem may have failed to perform some buffered writes.
-std::error_code closeFile(file_t &F);
+LLVM_ABI std::error_code closeFile(file_t &F);
#ifdef LLVM_ON_UNIX
/// @brief Change ownership of a file.
@@ -1212,7 +1226,8 @@ std::error_code closeFile(file_t &F);
/// @param Group The group of the file to change to.
/// @returns errc::success if successfully updated file ownership, otherwise an
/// error code is returned.
-std::error_code changeFileOwnership(int FD, uint32_t Owner, uint32_t Group);
+LLVM_ABI std::error_code changeFileOwnership(int FD, uint32_t Owner,
+ uint32_t Group);
#endif
/// RAII class that facilitates file locking.
@@ -1244,7 +1259,7 @@ public:
}
};
-std::error_code getUniqueID(const Twine Path, UniqueID &Result);
+LLVM_ABI std::error_code getUniqueID(const Twine Path, UniqueID &Result);
/// Get disk space usage information.
///
@@ -1255,7 +1270,7 @@ std::error_code getUniqueID(const Twine Path, UniqueID &Result);
/// @returns a space_info structure filled with the capacity, free, and
/// available space on the device \a Path is on. A platform specific error_code
/// is returned on error.
-ErrorOr<space_info> disk_space(const Twine &Path);
+LLVM_ABI ErrorOr<space_info> disk_space(const Twine &Path);
/// This class represents a memory mapped file. It is based on
/// boost::iostreams::mapped_file.
@@ -1290,10 +1305,11 @@ private:
Moved.copyFrom(mapped_file_region());
}
- void unmapImpl();
- void dontNeedImpl();
+ LLVM_ABI void unmapImpl();
+ LLVM_ABI void dontNeedImpl();
- std::error_code init(sys::fs::file_t FD, uint64_t Offset, mapmode Mode);
+ LLVM_ABI std::error_code init(sys::fs::file_t FD, uint64_t Offset,
+ mapmode Mode);
public:
mapped_file_region() = default;
@@ -1308,8 +1324,8 @@ public:
mapped_file_region &operator=(const mapped_file_region &) = delete;
/// \param fd An open file descriptor to map. Does not take ownership of fd.
- mapped_file_region(sys::fs::file_t fd, mapmode mode, size_t length, uint64_t offset,
- std::error_code &ec);
+ LLVM_ABI mapped_file_region(sys::fs::file_t fd, mapmode mode, size_t length,
+ uint64_t offset, std::error_code &ec);
~mapped_file_region() { unmapImpl(); }
@@ -1323,21 +1339,21 @@ public:
}
void dontNeed() { dontNeedImpl(); }
- size_t size() const;
- char *data() const;
+ LLVM_ABI size_t size() const;
+ LLVM_ABI char *data() const;
/// Get a const view of the data. Modifying this memory has undefined
/// behavior.
- const char *const_data() const;
+ LLVM_ABI const char *const_data() const;
/// \returns The minimum alignment offset must be.
- static int alignment();
+ LLVM_ABI static int alignment();
};
/// Return the path to the main executable, given the value of argv[0] from
/// program startup and the address of main itself. In extremis, this function
/// may fail and return an empty path.
-std::string getMainExecutable(const char *argv0, void *MainExecAddr);
+LLVM_ABI std::string getMainExecutable(const char *argv0, void *MainExecAddr);
/// @}
/// @name Iterators
@@ -1364,14 +1380,15 @@ public:
directory_entry() = default;
- void replace_filename(const Twine &Filename, file_type Type,
- basic_file_status Status = basic_file_status());
+ LLVM_ABI void
+ replace_filename(const Twine &Filename, file_type Type,
+ basic_file_status Status = basic_file_status());
const std::string &path() const { return Path; }
// Get basic information about entry file (a subset of fs::status()).
// On most platforms this is a stat() call.
// On windows the information was already retrieved from the directory.
- ErrorOr<basic_file_status> status() const;
+ LLVM_ABI ErrorOr<basic_file_status> status() const;
// Get the type of this file.
// On most platforms (Linux/Mac/Windows/BSD), this was already retrieved.
// On some platforms (e.g. Solaris) this is a stat() call.
@@ -1384,19 +1401,20 @@ public:
bool operator==(const directory_entry& RHS) const { return Path == RHS.Path; }
bool operator!=(const directory_entry& RHS) const { return !(*this == RHS); }
- bool operator< (const directory_entry& RHS) const;
- bool operator<=(const directory_entry& RHS) const;
- bool operator> (const directory_entry& RHS) const;
- bool operator>=(const directory_entry& RHS) const;
+ LLVM_ABI bool operator<(const directory_entry &RHS) const;
+ LLVM_ABI bool operator<=(const directory_entry &RHS) const;
+ LLVM_ABI bool operator>(const directory_entry &RHS) const;
+ LLVM_ABI bool operator>=(const directory_entry &RHS) const;
};
namespace detail {
struct DirIterState;
- std::error_code directory_iterator_construct(DirIterState &, StringRef, bool);
- std::error_code directory_iterator_increment(DirIterState &);
- std::error_code directory_iterator_destruct(DirIterState &);
+ LLVM_ABI std::error_code directory_iterator_construct(DirIterState &,
+ StringRef, bool);
+ LLVM_ABI std::error_code directory_iterator_increment(DirIterState &);
+ LLVM_ABI std::error_code directory_iterator_destruct(DirIterState &);
/// Keeps state for the directory_iterator.
struct DirIterState {
diff --git a/llvm/include/llvm/Support/FileUtilities.h b/llvm/include/llvm/Support/FileUtilities.h
index c5a8457..9421518 100644
--- a/llvm/include/llvm/Support/FileUtilities.h
+++ b/llvm/include/llvm/Support/FileUtilities.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_FILEUTILITIES_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
@@ -29,8 +30,9 @@ namespace llvm {
/// option, it will set the string to an error message if an error occurs, or
/// if the files are different.
///
-int DiffFilesWithTolerance(StringRef FileA, StringRef FileB, double AbsTol,
- double RelTol, std::string *Error = nullptr);
+LLVM_ABI int DiffFilesWithTolerance(StringRef FileA, StringRef FileB,
+ double AbsTol, double RelTol,
+ std::string *Error = nullptr);
/// FileRemover - This class is a simple object meant to be stack allocated.
/// If an exception is thrown from a region, the object removes the filename
@@ -79,12 +81,13 @@ public:
/// permissions and dates to the output file.
class FilePermissionsApplier {
public:
- static Expected<FilePermissionsApplier> create(StringRef InputFilename);
+ LLVM_ABI static Expected<FilePermissionsApplier>
+ create(StringRef InputFilename);
/// Apply stored permissions to the \p OutputFilename.
/// Copy LastAccess and ModificationTime if \p CopyDates is true.
/// Overwrite stored permissions if \p OverwritePermissions is specified.
- Error
+ LLVM_ABI Error
apply(StringRef OutputFilename, bool CopyDates = false,
std::optional<sys::fs::perms> OverwritePermissions = std::nullopt);
diff --git a/llvm/include/llvm/Support/Format.h b/llvm/include/llvm/Support/Format.h
index 89b6ae3..2553002 100644
--- a/llvm/include/llvm/Support/Format.h
+++ b/llvm/include/llvm/Support/Format.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <cstdio>
@@ -36,7 +37,7 @@ namespace llvm {
/// This is a helper class used for handling formatted output. It is the
/// abstract base class of a templated derived class.
-class format_object_base {
+class LLVM_ABI format_object_base {
protected:
const char *Fmt;
~format_object_base() = default; // Disallow polymorphic deletion.
diff --git a/llvm/include/llvm/Support/FormatVariadic.h b/llvm/include/llvm/Support/FormatVariadic.h
index d0e647e..8565292 100644
--- a/llvm/include/llvm/Support/FormatVariadic.h
+++ b/llvm/include/llvm/Support/FormatVariadic.h
@@ -30,6 +30,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/FormatCommon.h"
#include "llvm/Support/FormatProviders.h"
#include "llvm/Support/FormatVariadicDetails.h"
@@ -97,7 +98,7 @@ public:
}
// Parse and optionally validate format string (in debug builds).
- static SmallVector<ReplacementItem, 2>
+ LLVM_ABI static SmallVector<ReplacementItem, 2>
parseFormatString(StringRef Fmt, size_t NumArgs, bool Validate);
std::string str() const {
diff --git a/llvm/include/llvm/Support/FormatVariadicDetails.h b/llvm/include/llvm/Support/FormatVariadicDetails.h
index a221fca..b85a4f6 100644
--- a/llvm/include/llvm/Support/FormatVariadicDetails.h
+++ b/llvm/include/llvm/Support/FormatVariadicDetails.h
@@ -9,8 +9,9 @@
#ifndef LLVM_SUPPORT_FORMATVARIADICDETAILS_H
#define LLVM_SUPPORT_FORMATVARIADICDETAILS_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <type_traits>
@@ -21,7 +22,7 @@ class Error;
namespace support {
namespace detail {
-class format_adapter {
+class LLVM_ABI format_adapter {
virtual void anchor();
protected:
diff --git a/llvm/include/llvm/Support/FormattedStream.h b/llvm/include/llvm/Support/FormattedStream.h
index 850a18d..011a6ae 100644
--- a/llvm/include/llvm/Support/FormattedStream.h
+++ b/llvm/include/llvm/Support/FormattedStream.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_FORMATTEDSTREAM_H
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>
@@ -27,7 +28,7 @@ namespace llvm {
/// doesn't attempt to handle everything Unicode can do (combining characters,
/// right-to-left markers, etc), but should cover the cases likely to appear in
/// source code or diagnostic messages.
-class formatted_raw_ostream : public raw_ostream {
+class LLVM_ABI formatted_raw_ostream : public raw_ostream {
/// TheStream - The real stream we output to. We set it to be
/// unbuffered, since we're already doing our own buffering.
///
@@ -206,15 +207,15 @@ private:
/// fouts() - This returns a reference to a formatted_raw_ostream for
/// standard output. Use it like: fouts() << "foo" << "bar";
-formatted_raw_ostream &fouts();
+LLVM_ABI formatted_raw_ostream &fouts();
/// ferrs() - This returns a reference to a formatted_raw_ostream for
/// standard error. Use it like: ferrs() << "foo" << "bar";
-formatted_raw_ostream &ferrs();
+LLVM_ABI formatted_raw_ostream &ferrs();
/// fdbgs() - This returns a reference to a formatted_raw_ostream for
/// debug output. Use it like: fdbgs() << "foo" << "bar";
-formatted_raw_ostream &fdbgs();
+LLVM_ABI formatted_raw_ostream &fdbgs();
} // end llvm namespace
diff --git a/llvm/include/llvm/Support/GlobPattern.h b/llvm/include/llvm/Support/GlobPattern.h
index 5e5e5c8..62ed4a0 100644
--- a/llvm/include/llvm/Support/GlobPattern.h
+++ b/llvm/include/llvm/Support/GlobPattern.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include <optional>
@@ -54,10 +55,10 @@ public:
/// \param MaxSubPatterns if provided limit the number of allowed subpatterns
/// created from expanding braces otherwise disable
/// brace expansion
- static Expected<GlobPattern>
+ LLVM_ABI static Expected<GlobPattern>
create(StringRef Pat, std::optional<size_t> MaxSubPatterns = {});
/// \returns \p true if \p S matches this glob pattern
- bool match(StringRef S) const;
+ LLVM_ABI bool match(StringRef S) const;
// Returns true for glob pattern "*". Can be used to avoid expensive
// preparation/acquisition of the input for match().
@@ -74,9 +75,9 @@ private:
struct SubGlobPattern {
/// \param Pat the pattern to match against
- static Expected<SubGlobPattern> create(StringRef Pat);
+ LLVM_ABI static Expected<SubGlobPattern> create(StringRef Pat);
/// \returns \p true if \p S matches this glob pattern
- bool match(StringRef S) const;
+ LLVM_ABI bool match(StringRef S) const;
StringRef getPat() const { return StringRef(Pat.data(), Pat.size()); }
// Brackets with their end position and matched bytes.
diff --git a/llvm/include/llvm/Support/GraphWriter.h b/llvm/include/llvm/Support/GraphWriter.h
index 359b608..8b619a3 100644
--- a/llvm/include/llvm/Support/GraphWriter.h
+++ b/llvm/include/llvm/Support/GraphWriter.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
@@ -37,11 +38,11 @@ namespace llvm {
namespace DOT { // Private functions...
-std::string EscapeString(const std::string &Label);
+LLVM_ABI std::string EscapeString(const std::string &Label);
/// Get a color string for this node number. Simply round-robin selects
/// from a reasonable number of colors.
-StringRef getColorString(unsigned NodeNumber);
+LLVM_ABI StringRef getColorString(unsigned NodeNumber);
} // end namespace DOT
@@ -57,8 +58,8 @@ enum Name {
} // end namespace GraphProgram
-bool DisplayGraph(StringRef Filename, bool wait = true,
- GraphProgram::Name program = GraphProgram::DOT);
+LLVM_ABI bool DisplayGraph(StringRef Filename, bool wait = true,
+ GraphProgram::Name program = GraphProgram::DOT);
template<typename GraphType>
class GraphWriter {
@@ -368,7 +369,7 @@ raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G,
return O;
}
-std::string createGraphFilename(const Twine &Name, int &FD);
+LLVM_ABI std::string createGraphFilename(const Twine &Name, int &FD);
/// Writes graph into a provided @c Filename.
/// If @c Filename is empty, generates a random one.
diff --git a/llvm/include/llvm/Support/HexagonAttributeParser.h b/llvm/include/llvm/Support/HexagonAttributeParser.h
index 462bfc4..beca5dc 100644
--- a/llvm/include/llvm/Support/HexagonAttributeParser.h
+++ b/llvm/include/llvm/Support/HexagonAttributeParser.h
@@ -9,11 +9,12 @@
#ifndef LLVM_SUPPORT_HEXAGONATTRIBUTEPARSER_H
#define LLVM_SUPPORT_HEXAGONATTRIBUTEPARSER_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttrParserCompact.h"
#include "llvm/Support/HexagonAttributes.h"
namespace llvm {
-class HexagonAttributeParser : public ELFCompactAttrParser {
+class LLVM_ABI HexagonAttributeParser : public ELFCompactAttrParser {
struct DisplayHandler {
HexagonAttrs::AttrType Attribute;
Error (HexagonAttributeParser::*Routine)(unsigned);
diff --git a/llvm/include/llvm/Support/HexagonAttributes.h b/llvm/include/llvm/Support/HexagonAttributes.h
index 8a50d89..121daef 100644
--- a/llvm/include/llvm/Support/HexagonAttributes.h
+++ b/llvm/include/llvm/Support/HexagonAttributes.h
@@ -9,12 +9,13 @@
#ifndef LLVM_SUPPORT_HEXAGONATTRIBUTES_H
#define LLVM_SUPPORT_HEXAGONATTRIBUTES_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttributes.h"
namespace llvm {
namespace HexagonAttrs {
-const TagNameMap &getHexagonAttributeTags();
+LLVM_ABI const TagNameMap &getHexagonAttributeTags();
enum AttrType : unsigned {
ARCH = 4,
diff --git a/llvm/include/llvm/Support/InitLLVM.h b/llvm/include/llvm/Support/InitLLVM.h
index 172d13b..748f5d8 100644
--- a/llvm/include/llvm/Support/InitLLVM.h
+++ b/llvm/include/llvm/Support/InitLLVM.h
@@ -11,6 +11,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include <optional>
@@ -34,13 +35,13 @@
namespace llvm {
class InitLLVM {
public:
- InitLLVM(int &Argc, const char **&Argv,
- bool InstallPipeSignalExitHandler = true);
+ LLVM_ABI InitLLVM(int &Argc, const char **&Argv,
+ bool InstallPipeSignalExitHandler = true);
InitLLVM(int &Argc, char **&Argv, bool InstallPipeSignalExitHandler = true)
: InitLLVM(Argc, const_cast<const char **&>(Argv),
InstallPipeSignalExitHandler) {}
- ~InitLLVM();
+ LLVM_ABI ~InitLLVM();
private:
BumpPtrAllocator Alloc;
diff --git a/llvm/include/llvm/Support/InstructionCost.h b/llvm/include/llvm/Support/InstructionCost.h
index d5f7457..ab1c8eb 100644
--- a/llvm/include/llvm/Support/InstructionCost.h
+++ b/llvm/include/llvm/Support/InstructionCost.h
@@ -18,8 +18,10 @@
#ifndef LLVM_SUPPORT_INSTRUCTIONCOST_H
#define LLVM_SUPPORT_INSTRUCTIONCOST_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <limits>
+#include <tuple>
namespace llvm {
@@ -191,9 +193,7 @@ public:
/// the states are valid and users can test for validity of the cost
/// explicitly.
bool operator<(const InstructionCost &RHS) const {
- if (State != RHS.State)
- return State < RHS.State;
- return Value < RHS.Value;
+ return std::tie(State, Value) < std::tie(RHS.State, RHS.Value);
}
bool operator==(const InstructionCost &RHS) const {
@@ -235,7 +235,7 @@ public:
return *this >= RHS2;
}
- void print(raw_ostream &OS) const;
+ LLVM_ABI void print(raw_ostream &OS) const;
template <class Function>
auto map(const Function &F) const -> InstructionCost {
diff --git a/llvm/include/llvm/Support/JSON.h b/llvm/include/llvm/Support/JSON.h
index 0a6f3f2..962d79d 100644
--- a/llvm/include/llvm/Support/JSON.h
+++ b/llvm/include/llvm/Support/JSON.h
@@ -82,11 +82,11 @@ constexpr bool is_uint_64_bit_v =
/// Returns true if \p S is valid UTF-8, which is required for use as JSON.
/// If it returns false, \p Offset is set to a byte offset near the first error.
-bool isUTF8(llvm::StringRef S, size_t *ErrOffset = nullptr);
+LLVM_ABI bool isUTF8(llvm::StringRef S, size_t *ErrOffset = nullptr);
/// Replaces invalid UTF-8 sequences in \p S with the replacement character
/// (U+FFFD). The returned string is valid UTF-8.
/// This is much slower than isUTF8, so test that first.
-std::string fixUTF8(llvm::StringRef S);
+LLVM_ABI std::string fixUTF8(llvm::StringRef S);
class Array;
class ObjectKey;
@@ -136,25 +136,25 @@ public:
iterator find(StringRef K) { return M.find_as(K); }
const_iterator find(StringRef K) const { return M.find_as(K); }
// operator[] acts as if Value was default-constructible as null.
- Value &operator[](const ObjectKey &K);
- Value &operator[](ObjectKey &&K);
+ LLVM_ABI Value &operator[](const ObjectKey &K);
+ LLVM_ABI Value &operator[](ObjectKey &&K);
// Look up a property, returning nullptr if it doesn't exist.
- Value *get(StringRef K);
- const Value *get(StringRef K) const;
+ LLVM_ABI Value *get(StringRef K);
+ LLVM_ABI const Value *get(StringRef K) const;
// Typed accessors return std::nullopt/nullptr if
// - the property doesn't exist
// - or it has the wrong type
- std::optional<std::nullptr_t> getNull(StringRef K) const;
- std::optional<bool> getBoolean(StringRef K) const;
- std::optional<double> getNumber(StringRef K) const;
- std::optional<int64_t> getInteger(StringRef K) const;
- std::optional<llvm::StringRef> getString(StringRef K) const;
- const json::Object *getObject(StringRef K) const;
- json::Object *getObject(StringRef K);
- const json::Array *getArray(StringRef K) const;
- json::Array *getArray(StringRef K);
+ LLVM_ABI std::optional<std::nullptr_t> getNull(StringRef K) const;
+ LLVM_ABI std::optional<bool> getBoolean(StringRef K) const;
+ LLVM_ABI std::optional<double> getNumber(StringRef K) const;
+ LLVM_ABI std::optional<int64_t> getInteger(StringRef K) const;
+ LLVM_ABI std::optional<llvm::StringRef> getString(StringRef K) const;
+ LLVM_ABI const json::Object *getObject(StringRef K) const;
+ LLVM_ABI json::Object *getObject(StringRef K);
+ LLVM_ABI const json::Array *getArray(StringRef K) const;
+ LLVM_ABI json::Array *getArray(StringRef K);
};
-bool operator==(const Object &LHS, const Object &RHS);
+LLVM_ABI bool operator==(const Object &LHS, const Object &RHS);
inline bool operator!=(const Object &LHS, const Object &RHS) {
return !(LHS == RHS);
}
@@ -170,7 +170,7 @@ public:
using const_iterator = std::vector<Value>::const_iterator;
Array() = default;
- explicit Array(std::initializer_list<Value> Elements);
+ LLVM_ABI explicit Array(std::initializer_list<Value> Elements);
template <typename Collection> explicit Array(const Collection &C) {
for (const auto &V : C)
emplace_back(V);
@@ -301,7 +301,7 @@ public:
// It would be nice to have Value() be null. But that would make {} null too.
Value(const Value &M) { copyFrom(M); }
Value(Value &&M) { moveFrom(std::move(M)); }
- Value(std::initializer_list<Value> Elements);
+ LLVM_ABI Value(std::initializer_list<Value> Elements);
Value(json::Array &&Elements) : Type(T_Array) {
create<json::Array>(std::move(Elements));
}
@@ -472,7 +472,7 @@ public:
return LLVM_LIKELY(Type == T_Array) ? &as<json::Array>() : nullptr;
}
- void print(llvm::raw_ostream &OS) const;
+ LLVM_ABI void print(llvm::raw_ostream &OS) const;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void dump() const {
print(llvm::dbgs());
@@ -481,12 +481,12 @@ public:
#endif // !NDEBUG || LLVM_ENABLE_DUMP
private:
- void destroy();
- void copyFrom(const Value &M);
+ LLVM_ABI void destroy();
+ LLVM_ABI void copyFrom(const Value &M);
// We allow moving from *const* Values, by marking all members as mutable!
// This hack is needed to support initializer-list syntax efficiently.
// (std::initializer_list<T> is a container of const T).
- void moveFrom(const Value &&M);
+ LLVM_ABI void moveFrom(const Value &&M);
friend class Array;
friend class Object;
@@ -531,10 +531,10 @@ private:
llvm::StringRef, std::string, json::Array,
json::Object>
Union;
- friend bool operator==(const Value &, const Value &);
+ LLVM_ABI_FRIEND friend bool operator==(const Value &, const Value &);
};
-bool operator==(const Value &, const Value &);
+LLVM_ABI bool operator==(const Value &, const Value &);
inline bool operator!=(const Value &L, const Value &R) { return !(L == R); }
// Array Methods
@@ -655,7 +655,8 @@ inline bool Object::erase(StringRef K) {
return M.erase(ObjectKey(K));
}
-std::vector<const Object::value_type *> sortedElements(const Object &O);
+LLVM_ABI std::vector<const Object::value_type *>
+sortedElements(const Object &O);
/// A "cursor" marking a position within a Value.
/// The Value is a tree, and this is the path from the root to the current node.
@@ -667,7 +668,7 @@ public:
/// Records that the value at the current path is invalid.
/// Message is e.g. "expected number" and becomes part of the final error.
/// This overwrites any previously written error message in the root.
- void report(llvm::StringLiteral Message);
+ LLVM_ABI void report(llvm::StringLiteral Message);
/// The root may be treated as a Path.
Path(Root &R) : Parent(nullptr), Seg(&R) {}
@@ -712,7 +713,7 @@ class Path::Root {
llvm::StringLiteral ErrorMessage;
std::vector<Path::Segment> ErrorPath; // Only valid in error state. Reversed.
- friend void Path::report(llvm::StringLiteral Message);
+ LLVM_ABI_FRIEND friend void Path::report(llvm::StringLiteral Message);
public:
Root(llvm::StringRef Name = "") : Name(Name), ErrorMessage("") {}
@@ -723,7 +724,7 @@ public:
Root &operator=(const Root &) = delete;
/// Returns the last error reported, or else a generic error.
- Error getError() const;
+ LLVM_ABI Error getError() const;
/// Print the root value with the error shown inline as a comment.
/// Unrelated parts of the value are elided for brevity, e.g.
/// {
@@ -731,7 +732,7 @@ public:
/// "name": /* expected string */ null,
/// "properties": { ... }
/// }
- void printErrorContext(const Value &, llvm::raw_ostream &) const;
+ LLVM_ABI void printErrorContext(const Value &, llvm::raw_ostream &) const;
};
// Standard deserializers are provided for primitive types.
@@ -906,14 +907,14 @@ private:
/// Parses the provided JSON source, or returns a ParseError.
/// The returned Value is self-contained and owns its strings (they do not refer
/// to the original source).
-llvm::Expected<Value> parse(llvm::StringRef JSON);
+LLVM_ABI llvm::Expected<Value> parse(llvm::StringRef JSON);
class ParseError : public llvm::ErrorInfo<ParseError> {
const char *Msg;
unsigned Line, Column, Offset;
public:
- static char ID;
+ LLVM_ABI static char ID;
ParseError(const char *Msg, unsigned Line, unsigned Column, unsigned Offset)
: Msg(Msg), Line(Line), Column(Column), Offset(Offset) {}
void log(llvm::raw_ostream &OS) const override {
@@ -1014,7 +1015,7 @@ class OStream {
// or in an array (any number of times).
/// Emit a self-contained value (number, string, vector<string> etc).
- void value(const Value &V);
+ LLVM_ABI void value(const Value &V);
/// Emit an array whose elements are emitted in the provided Block.
void array(Block Contents) {
arrayBegin();
@@ -1041,7 +1042,7 @@ class OStream {
/// Emit a JavaScript comment associated with the next printed value.
/// The string must be valid until the next attribute or value is emitted.
/// Comments are not part of standard JSON, and many parsers reject them!
- void comment(llvm::StringRef);
+ LLVM_ABI void comment(llvm::StringRef);
// High level functions to output object attributes.
// Valid only within an object (any number of times).
@@ -1062,14 +1063,14 @@ class OStream {
// Low-level begin/end functions to output arrays, objects, and attributes.
// Must be correctly paired. Allowed contexts are as above.
- void arrayBegin();
- void arrayEnd();
- void objectBegin();
- void objectEnd();
- void attributeBegin(llvm::StringRef Key);
- void attributeEnd();
- raw_ostream &rawValueBegin();
- void rawValueEnd();
+ LLVM_ABI void arrayBegin();
+ LLVM_ABI void arrayEnd();
+ LLVM_ABI void objectBegin();
+ LLVM_ABI void objectEnd();
+ LLVM_ABI void attributeBegin(llvm::StringRef Key);
+ LLVM_ABI void attributeEnd();
+ LLVM_ABI raw_ostream &rawValueBegin();
+ LLVM_ABI void rawValueEnd();
private:
void attributeImpl(llvm::StringRef Key, Block Contents) {
@@ -1078,9 +1079,9 @@ private:
attributeEnd();
}
- void valueBegin();
- void flushComment();
- void newline();
+ LLVM_ABI void valueBegin();
+ LLVM_ABI void flushComment();
+ LLVM_ABI void newline();
enum Context {
Singleton, // Top level, or object attribute.
@@ -1112,7 +1113,8 @@ inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Value &V) {
/// The default style is basic/compact formatting, like operator<<.
/// A format string like formatv("{0:2}", Value) pretty-prints with indent 2.
template <> struct format_provider<llvm::json::Value> {
- static void format(const llvm::json::Value &, raw_ostream &, StringRef);
+ LLVM_ABI static void format(const llvm::json::Value &, raw_ostream &,
+ StringRef);
};
} // namespace llvm
diff --git a/llvm/include/llvm/Support/KnownBits.h b/llvm/include/llvm/Support/KnownBits.h
index a4b554f..6a14328 100644
--- a/llvm/include/llvm/Support/KnownBits.h
+++ b/llvm/include/llvm/Support/KnownBits.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_KNOWNBITS_H
#include "llvm/ADT/APInt.h"
+#include "llvm/Support/Compiler.h"
#include <optional>
namespace llvm {
@@ -205,7 +206,7 @@ public:
/// Return known bits for a in-register sign extension of the value we're
/// tracking.
- KnownBits sextInReg(unsigned SrcBitWidth) const;
+ LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const;
/// Insert the bits from a smaller known bits starting at bitPosition.
void insertBits(const KnownBits &SubBits, unsigned BitPosition) {
@@ -228,7 +229,7 @@ public:
/// Return KnownBits based on this, but updated given that the underlying
/// value is known to be greater than or equal to Val.
- KnownBits makeGE(const APInt &Val) const;
+ LLVM_ABI KnownBits makeGE(const APInt &Val) const;
/// Returns the minimum number of trailing zero bits.
unsigned countMinTrailingZeros() const { return Zero.countr_one(); }
@@ -320,17 +321,20 @@ public:
}
/// Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
- static KnownBits computeForAddCarry(
- const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry);
+ LLVM_ABI static KnownBits computeForAddCarry(const KnownBits &LHS,
+ const KnownBits &RHS,
+ const KnownBits &Carry);
/// Compute known bits resulting from adding LHS and RHS.
- static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW,
- const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW,
+ const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute known bits results from subtracting RHS from LHS with 1-bit
/// Borrow.
- static KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS,
- const KnownBits &Borrow);
+ LLVM_ABI static KnownBits computeForSubBorrow(const KnownBits &LHS,
+ KnownBits RHS,
+ const KnownBits &Borrow);
/// Compute knownbits resulting from addition of LHS and RHS.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS,
@@ -345,128 +349,146 @@ public:
}
/// Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
- static KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits sadd_sat(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
- static KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits uadd_sat(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
- static KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits ssub_sat(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
- static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits usub_sat(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute knownbits resulting from APIntOps::avgFloorS
- static KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits avgFloorS(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute knownbits resulting from APIntOps::avgFloorU
- static KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits avgFloorU(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute knownbits resulting from APIntOps::avgCeilS
- static KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits avgCeilS(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute knownbits resulting from APIntOps::avgCeilU
- static KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits avgCeilU(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Compute known bits resulting from multiplying LHS and RHS.
- static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS,
- bool NoUndefSelfMultiply = false);
+ LLVM_ABI static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS,
+ bool NoUndefSelfMultiply = false);
/// Compute known bits from sign-extended multiply-hi.
- static KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits from zero-extended multiply-hi.
- static KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits for sdiv(LHS, RHS).
- static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS,
- bool Exact = false);
+ LLVM_ABI static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS,
+ bool Exact = false);
/// Compute known bits for udiv(LHS, RHS).
- static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS,
- bool Exact = false);
+ LLVM_ABI static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS,
+ bool Exact = false);
/// Compute known bits for urem(LHS, RHS).
- static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits for srem(LHS, RHS).
- static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits for umax(LHS, RHS).
- static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits for umin(LHS, RHS).
- static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits for smax(LHS, RHS).
- static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits for smin(LHS, RHS).
- static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits for abdu(LHS, RHS).
- static KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS);
/// Compute known bits for abds(LHS, RHS).
- static KnownBits abds(KnownBits LHS, KnownBits RHS);
+ LLVM_ABI static KnownBits abds(KnownBits LHS, KnownBits RHS);
/// Compute known bits for shl(LHS, RHS).
/// NOTE: RHS (shift amount) bitwidth doesn't need to be the same as LHS.
- static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS,
- bool NUW = false, bool NSW = false,
- bool ShAmtNonZero = false);
+ LLVM_ABI static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS,
+ bool NUW = false, bool NSW = false,
+ bool ShAmtNonZero = false);
/// Compute known bits for lshr(LHS, RHS).
/// NOTE: RHS (shift amount) bitwidth doesn't need to be the same as LHS.
- static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS,
- bool ShAmtNonZero = false, bool Exact = false);
+ LLVM_ABI static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS,
+ bool ShAmtNonZero = false, bool Exact = false);
/// Compute known bits for ashr(LHS, RHS).
/// NOTE: RHS (shift amount) bitwidth doesn't need to be the same as LHS.
- static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS,
- bool ShAmtNonZero = false, bool Exact = false);
+ LLVM_ABI static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS,
+ bool ShAmtNonZero = false, bool Exact = false);
/// Determine if these known bits always give the same ICMP_EQ result.
- static std::optional<bool> eq(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> eq(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_NE result.
- static std::optional<bool> ne(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> ne(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_UGT result.
- static std::optional<bool> ugt(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> ugt(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_UGE result.
- static std::optional<bool> uge(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> uge(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_ULT result.
- static std::optional<bool> ult(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> ult(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_ULE result.
- static std::optional<bool> ule(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> ule(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_SGT result.
- static std::optional<bool> sgt(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> sgt(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_SGE result.
- static std::optional<bool> sge(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> sge(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_SLT result.
- static std::optional<bool> slt(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> slt(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Determine if these known bits always give the same ICMP_SLE result.
- static std::optional<bool> sle(const KnownBits &LHS, const KnownBits &RHS);
+ LLVM_ABI static std::optional<bool> sle(const KnownBits &LHS,
+ const KnownBits &RHS);
/// Update known bits based on ANDing with RHS.
- KnownBits &operator&=(const KnownBits &RHS);
+ LLVM_ABI KnownBits &operator&=(const KnownBits &RHS);
/// Update known bits based on ORing with RHS.
- KnownBits &operator|=(const KnownBits &RHS);
+ LLVM_ABI KnownBits &operator|=(const KnownBits &RHS);
/// Update known bits based on XORing with RHS.
- KnownBits &operator^=(const KnownBits &RHS);
+ LLVM_ABI KnownBits &operator^=(const KnownBits &RHS);
/// Compute known bits for the absolute value.
- KnownBits abs(bool IntMinIsPoison = false) const;
+ LLVM_ABI KnownBits abs(bool IntMinIsPoison = false) const;
KnownBits byteSwap() const {
return KnownBits(Zero.byteSwap(), One.byteSwap());
@@ -478,11 +500,11 @@ public:
/// Compute known bits for X & -X, which has only the lowest bit set of X set.
/// The name comes from the X86 BMI instruction
- KnownBits blsi() const;
+ LLVM_ABI KnownBits blsi() const;
/// Compute known bits for X ^ (X - 1), which has all bits up to and including
/// the lowest set bit of X set. The name comes from the X86 BMI instruction.
- KnownBits blsmsk() const;
+ LLVM_ABI KnownBits blsmsk() const;
bool operator==(const KnownBits &Other) const {
return Zero == Other.Zero && One == Other.One;
@@ -490,8 +512,8 @@ public:
bool operator!=(const KnownBits &Other) const { return !(*this == Other); }
- void print(raw_ostream &OS) const;
- void dump() const;
+ LLVM_ABI void print(raw_ostream &OS) const;
+ LLVM_ABI void dump() const;
private:
// Internal helper for getting the initial KnownBits for an `srem` or `urem`
diff --git a/llvm/include/llvm/Support/LEB128.h b/llvm/include/llvm/Support/LEB128.h
index a15b73b..ce789cc 100644
--- a/llvm/include/llvm/Support/LEB128.h
+++ b/llvm/include/llvm/Support/LEB128.h
@@ -14,6 +14,7 @@
#ifndef LLVM_SUPPORT_LEB128_H
#define LLVM_SUPPORT_LEB128_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
@@ -221,10 +222,10 @@ inline uint64_t decodeULEB128AndIncUnsafe(const uint8_t *&p) {
}
/// Utility function to get the size of the ULEB128-encoded value.
-extern unsigned getULEB128Size(uint64_t Value);
+LLVM_ABI extern unsigned getULEB128Size(uint64_t Value);
/// Utility function to get the size of the SLEB128-encoded value.
-extern unsigned getSLEB128Size(int64_t Value);
+LLVM_ABI extern unsigned getSLEB128Size(int64_t Value);
} // namespace llvm
diff --git a/llvm/include/llvm/Support/LineIterator.h b/llvm/include/llvm/Support/LineIterator.h
index fc6871b..18b11dac 100644
--- a/llvm/include/llvm/Support/LineIterator.h
+++ b/llvm/include/llvm/Support/LineIterator.h
@@ -10,6 +10,7 @@
#define LLVM_SUPPORT_LINEITERATOR_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <iterator>
@@ -49,12 +50,14 @@ public:
line_iterator() = default;
/// Construct a new iterator around an unowned memory buffer.
- explicit line_iterator(const MemoryBufferRef &Buffer, bool SkipBlanks = true,
- char CommentMarker = '\0');
+ LLVM_ABI explicit line_iterator(const MemoryBufferRef &Buffer,
+ bool SkipBlanks = true,
+ char CommentMarker = '\0');
/// Construct a new iterator around some memory buffer.
- explicit line_iterator(const MemoryBuffer &Buffer, bool SkipBlanks = true,
- char CommentMarker = '\0');
+ LLVM_ABI explicit line_iterator(const MemoryBuffer &Buffer,
+ bool SkipBlanks = true,
+ char CommentMarker = '\0');
/// Return true if we've reached EOF or are an "end" iterator.
bool is_at_eof() const { return !Buffer; }
@@ -91,7 +94,7 @@ public:
private:
/// Advance the iterator to the next line.
- void advance();
+ LLVM_ABI void advance();
};
}
diff --git a/llvm/include/llvm/Support/Locale.h b/llvm/include/llvm/Support/Locale.h
index f7a2c03..8163dfe 100644
--- a/llvm/include/llvm/Support/Locale.h
+++ b/llvm/include/llvm/Support/Locale.h
@@ -1,15 +1,16 @@
#ifndef LLVM_SUPPORT_LOCALE_H
#define LLVM_SUPPORT_LOCALE_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class StringRef;
namespace sys {
namespace locale {
-int columnWidth(StringRef s);
-bool isPrint(int c);
-
+LLVM_ABI int columnWidth(StringRef s);
+LLVM_ABI bool isPrint(int c);
}
}
}
diff --git a/llvm/include/llvm/Support/LockFileManager.h b/llvm/include/llvm/Support/LockFileManager.h
index a126fa3..1c579ea 100644
--- a/llvm/include/llvm/Support/LockFileManager.h
+++ b/llvm/include/llvm/Support/LockFileManager.h
@@ -11,6 +11,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/AdvisoryLock.h"
+#include "llvm/Support/Compiler.h"
#include <optional>
#include <string>
#include <variant>
@@ -23,7 +24,7 @@ namespace llvm {
/// atomicity of the file system to ensure that only a single process can create
/// that ".lock" file. When the lock file is removed, the owning process has
/// finished the operation.
-class LockFileManager : public AdvisoryLock {
+class LLVM_ABI LockFileManager : public AdvisoryLock {
SmallString<128> FileName;
SmallString<128> LockFileName;
SmallString<128> UniqueLockFileName;
diff --git a/llvm/include/llvm/Support/MD5.h b/llvm/include/llvm/Support/MD5.h
index 0e9f22d..66e2119 100644
--- a/llvm/include/llvm/Support/MD5.h
+++ b/llvm/include/llvm/Support/MD5.h
@@ -29,6 +29,7 @@
#define LLVM_SUPPORT_MD5_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Endian.h"
#include <array>
#include <cstdint>
@@ -41,7 +42,7 @@ template <typename T> class ArrayRef;
class MD5 {
public:
struct MD5Result : public std::array<uint8_t, 16> {
- SmallString<32> digest() const;
+ LLVM_ABI SmallString<32> digest() const;
uint64_t low() const {
// Our MD5 implementation returns the result in little endian, so the low
@@ -60,31 +61,32 @@ public:
}
};
- MD5();
+ LLVM_ABI MD5();
/// Updates the hash for the byte stream provided.
- void update(ArrayRef<uint8_t> Data);
+ LLVM_ABI void update(ArrayRef<uint8_t> Data);
/// Updates the hash for the StringRef provided.
- void update(StringRef Str);
+ LLVM_ABI void update(StringRef Str);
/// Finishes off the hash and puts the result in result.
- void final(MD5Result &Result);
+ LLVM_ABI void final(MD5Result &Result);
/// Finishes off the hash, and returns the 16-byte hash data.
- MD5Result final();
+ LLVM_ABI MD5Result final();
/// Finishes off the hash, and returns the 16-byte hash data.
/// This is suitable for getting the MD5 at any time without invalidating the
/// internal state, so that more calls can be made into `update`.
- MD5Result result();
+ LLVM_ABI MD5Result result();
/// Translates the bytes in \p Res to a hex string that is
/// deposited into \p Str. The result will be of length 32.
- static void stringifyResult(MD5Result &Result, SmallVectorImpl<char> &Str);
+ LLVM_ABI static void stringifyResult(MD5Result &Result,
+ SmallVectorImpl<char> &Str);
/// Computes the hash for a given bytes.
- static MD5Result hash(ArrayRef<uint8_t> Data);
+ LLVM_ABI static MD5Result hash(ArrayRef<uint8_t> Data);
private:
// Any 32-bit or wider unsigned integer data type will do.
@@ -102,7 +104,7 @@ private:
MD5_u32plus block[16];
} InternalState;
- const uint8_t *body(ArrayRef<uint8_t> Data);
+ LLVM_ABI const uint8_t *body(ArrayRef<uint8_t> Data);
};
/// Helper to compute and return lower 64 bits of the given string's MD5 hash.
diff --git a/llvm/include/llvm/Support/MSP430AttributeParser.h b/llvm/include/llvm/Support/MSP430AttributeParser.h
index 3a4f1b4..7e5271ca 100644
--- a/llvm/include/llvm/Support/MSP430AttributeParser.h
+++ b/llvm/include/llvm/Support/MSP430AttributeParser.h
@@ -14,11 +14,12 @@
#ifndef LLVM_SUPPORT_MSP430ATTRIBUTEPARSER_H
#define LLVM_SUPPORT_MSP430ATTRIBUTEPARSER_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttrParserCompact.h"
#include "llvm/Support/MSP430Attributes.h"
namespace llvm {
-class MSP430AttributeParser : public ELFCompactAttrParser {
+class LLVM_ABI MSP430AttributeParser : public ELFCompactAttrParser {
struct DisplayHandler {
MSP430Attrs::AttrType Attribute;
Error (MSP430AttributeParser::*Routine)(MSP430Attrs::AttrType);
diff --git a/llvm/include/llvm/Support/MSP430Attributes.h b/llvm/include/llvm/Support/MSP430Attributes.h
index fccd65e..4cd57b7 100644
--- a/llvm/include/llvm/Support/MSP430Attributes.h
+++ b/llvm/include/llvm/Support/MSP430Attributes.h
@@ -18,12 +18,13 @@
#ifndef LLVM_SUPPORT_MSP430ATTRIBUTES_H
#define LLVM_SUPPORT_MSP430ATTRIBUTES_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttributes.h"
namespace llvm {
namespace MSP430Attrs {
-const TagNameMap &getMSP430AttributeTags();
+LLVM_ABI const TagNameMap &getMSP430AttributeTags();
enum AttrType : unsigned {
// Attribute types in ELF/.MSP430.attributes.
diff --git a/llvm/include/llvm/Support/ManagedStatic.h b/llvm/include/llvm/Support/ManagedStatic.h
index f2b4142..1185b8e7 100644
--- a/llvm/include/llvm/Support/ManagedStatic.h
+++ b/llvm/include/llvm/Support/ManagedStatic.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_MANAGEDSTATIC_H
#define LLVM_SUPPORT_MANAGEDSTATIC_H
+#include "llvm/Support/Compiler.h"
#include <atomic>
#include <cstddef>
@@ -60,7 +61,8 @@ protected:
mutable const ManagedStaticBase *Next;
#endif
- void RegisterManagedStatic(void *(*creator)(), void (*deleter)(void*)) const;
+ LLVM_ABI void RegisterManagedStatic(void *(*creator)(),
+ void (*deleter)(void *)) const;
public:
#ifdef LLVM_USE_CONSTEXPR_CTOR
@@ -70,7 +72,7 @@ public:
/// isConstructed - Return true if this object has not been created yet.
bool isConstructed() const { return Ptr != nullptr; }
- void destroy() const;
+ LLVM_ABI void destroy() const;
};
/// ManagedStatic - This transparently changes the behavior of global statics to
@@ -111,7 +113,7 @@ public:
};
/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables.
-void llvm_shutdown();
+LLVM_ABI void llvm_shutdown();
/// llvm_shutdown_obj - This is a simple helper class that calls
/// llvm_shutdown() when it is destroyed.
diff --git a/llvm/include/llvm/Support/MathExtras.h b/llvm/include/llvm/Support/MathExtras.h
index 519fcc8..246080a 100644
--- a/llvm/include/llvm/Support/MathExtras.h
+++ b/llvm/include/llvm/Support/MathExtras.h
@@ -703,7 +703,7 @@ SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
}
/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
-extern const float huge_valf;
+LLVM_ABI extern const float huge_valf;
/// Add two signed integers, computing the two's complement truncated result,
/// returning true if overflow occurred.
diff --git a/llvm/include/llvm/Support/MemAlloc.h b/llvm/include/llvm/Support/MemAlloc.h
index f3f378b..3b086cb 100644
--- a/llvm/include/llvm/Support/MemAlloc.h
+++ b/llvm/include/llvm/Support/MemAlloc.h
@@ -71,7 +71,7 @@ LLVM_ATTRIBUTE_RETURNS_NONNULL inline void *safe_realloc(void *Ptr, size_t Sz) {
/// like posix_memalign due to portability. It is mostly intended to allow
/// compatibility with platforms that, after aligned allocation was added, use
/// reduced default alignment.
-LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
+LLVM_ABI LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
allocate_buffer(size_t Size, size_t Alignment);
/// Deallocate a buffer of memory with the given size and alignment.
@@ -81,7 +81,7 @@ allocate_buffer(size_t Size, size_t Alignment);
///
/// The pointer must have been allocated with the corresponding new operator,
/// most likely using the above helper.
-void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment);
+LLVM_ABI void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment);
} // namespace llvm
#endif
diff --git a/llvm/include/llvm/Support/Memory.h b/llvm/include/llvm/Support/Memory.h
index a587f2a..3cee770 100644
--- a/llvm/include/llvm/Support/Memory.h
+++ b/llvm/include/llvm/Support/Memory.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_MEMORY_H
#define LLVM_SUPPORT_MEMORY_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <system_error>
#include <utility>
@@ -95,10 +96,9 @@ namespace sys {
/// otherwise a null MemoryBlock is with \p EC describing the error.
///
/// Allocate mapped memory.
- static MemoryBlock allocateMappedMemory(size_t NumBytes,
- const MemoryBlock *const NearBlock,
- unsigned Flags,
- std::error_code &EC);
+ LLVM_ABI static MemoryBlock
+ allocateMappedMemory(size_t NumBytes, const MemoryBlock *const NearBlock,
+ unsigned Flags, std::error_code &EC);
/// This method releases a block of memory that was allocated with the
/// allocateMappedMemory method. It should not be used to release any
@@ -109,7 +109,7 @@ namespace sys {
/// describing the failure if an error occurred.
///
/// Release mapped memory.
- static std::error_code releaseMappedMemory(MemoryBlock &Block);
+ LLVM_ABI static std::error_code releaseMappedMemory(MemoryBlock &Block);
/// This method sets the protection flags for a block of memory to the
/// state specified by /p Flags. The behavior is not specified if the
@@ -125,13 +125,14 @@ namespace sys {
/// describing the failure if an error occurred.
///
/// Set memory protection state.
- static std::error_code protectMappedMemory(const MemoryBlock &Block,
- unsigned Flags);
+ LLVM_ABI static std::error_code
+ protectMappedMemory(const MemoryBlock &Block, unsigned Flags);
/// InvalidateInstructionCache - Before the JIT can run a block of code
/// that has been emitted it must invalidate the instruction cache on some
/// platforms.
- static void InvalidateInstructionCache(const void *Addr, size_t Len);
+ LLVM_ABI static void InvalidateInstructionCache(const void *Addr,
+ size_t Len);
};
/// Owning version of MemoryBlock.
diff --git a/llvm/include/llvm/Support/MemoryBuffer.h b/llvm/include/llvm/Support/MemoryBuffer.h
index b3477f1d..f092c67 100644
--- a/llvm/include/llvm/Support/MemoryBuffer.h
+++ b/llvm/include/llvm/Support/MemoryBuffer.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <cstddef>
@@ -48,7 +49,7 @@ using file_t = int;
/// be more efficient for clients which are reading all the data to stop
/// reading when they encounter a '\0' than to continually check the file
/// position to see if it has reached the end of the file.
-class MemoryBuffer {
+class LLVM_ABI MemoryBuffer {
const char *BufferStart; // Start of the buffer.
const char *BufferEnd; // End of the buffer.
@@ -199,12 +200,12 @@ public:
return {getBufferStart(), getBufferEnd()};
}
- static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
+ LLVM_ABI static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
getFile(const Twine &Filename, bool IsVolatile = false,
std::optional<Align> Alignment = std::nullopt);
/// Map a subrange of the specified file as a WritableMemoryBuffer.
- static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
+ LLVM_ABI static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset,
bool IsVolatile = false,
std::optional<Align> Alignment = std::nullopt);
@@ -215,14 +216,14 @@ public:
///
/// \param Alignment Set to indicate that the buffer should be aligned to at
/// least the specified alignment.
- static std::unique_ptr<WritableMemoryBuffer>
+ LLVM_ABI static std::unique_ptr<WritableMemoryBuffer>
getNewUninitMemBuffer(size_t Size, const Twine &BufferName = "",
std::optional<Align> Alignment = std::nullopt);
/// Allocate a new zero-initialized MemoryBuffer of the specified size. Note
/// that the caller need not initialize the memory allocated by this method.
/// The memory is owned by the MemoryBuffer object.
- static std::unique_ptr<WritableMemoryBuffer>
+ LLVM_ABI static std::unique_ptr<WritableMemoryBuffer>
getNewMemBuffer(size_t Size, const Twine &BufferName = "");
private:
@@ -263,11 +264,11 @@ public:
return {getBufferStart(), getBufferEnd()};
}
- static ErrorOr<std::unique_ptr<WriteThroughMemoryBuffer>>
+ LLVM_ABI static ErrorOr<std::unique_ptr<WriteThroughMemoryBuffer>>
getFile(const Twine &Filename, int64_t FileSize = -1);
/// Map a subrange of the specified file as a ReadWriteMemoryBuffer.
- static ErrorOr<std::unique_ptr<WriteThroughMemoryBuffer>>
+ LLVM_ABI static ErrorOr<std::unique_ptr<WriteThroughMemoryBuffer>>
getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset);
private:
diff --git a/llvm/include/llvm/Support/MemoryBufferRef.h b/llvm/include/llvm/Support/MemoryBufferRef.h
index b38a1f3..767bcca 100644
--- a/llvm/include/llvm/Support/MemoryBufferRef.h
+++ b/llvm/include/llvm/Support/MemoryBufferRef.h
@@ -14,6 +14,7 @@
#define LLVM_SUPPORT_MEMORYBUFFERREF_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -25,7 +26,7 @@ class MemoryBufferRef {
public:
MemoryBufferRef() = default;
- MemoryBufferRef(const MemoryBuffer &Buffer);
+ LLVM_ABI MemoryBufferRef(const MemoryBuffer &Buffer);
MemoryBufferRef(StringRef Buffer, StringRef Identifier)
: Buffer(Buffer), Identifier(Identifier) {}
diff --git a/llvm/include/llvm/Support/ModRef.h b/llvm/include/llvm/Support/ModRef.h
index 677c0a2..71f3b5b 100644
--- a/llvm/include/llvm/Support/ModRef.h
+++ b/llvm/include/llvm/Support/ModRef.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/Sequence.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
@@ -53,7 +54,7 @@ enum class ModRefInfo : uint8_t {
}
/// Debug print ModRefInfo.
-raw_ostream &operator<<(raw_ostream &OS, ModRefInfo MR);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, ModRefInfo MR);
/// The locations at which a function might access memory.
enum class IRMemLocation {
@@ -295,7 +296,7 @@ public:
using MemoryEffects = MemoryEffectsBase<IRMemLocation>;
/// Debug print MemoryEffects.
-raw_ostream &operator<<(raw_ostream &OS, MemoryEffects RMRB);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, MemoryEffects RMRB);
// Legacy alias.
using FunctionModRefBehavior = MemoryEffects;
@@ -344,7 +345,7 @@ inline bool capturesAll(CaptureComponents CC) {
return CC == CaptureComponents::All;
}
-raw_ostream &operator<<(raw_ostream &OS, CaptureComponents CC);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, CaptureComponents CC);
/// Represents which components of the pointer may be captured in which
/// location. This represents the captures(...) attribute in IR.
@@ -433,7 +434,7 @@ public:
}
};
-raw_ostream &operator<<(raw_ostream &OS, CaptureInfo Info);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, CaptureInfo Info);
} // namespace llvm
diff --git a/llvm/include/llvm/Support/Mustache.h b/llvm/include/llvm/Support/Mustache.h
index 9cd9673..781ec55 100644
--- a/llvm/include/llvm/Support/Mustache.h
+++ b/llvm/include/llvm/Support/Mustache.h
@@ -72,6 +72,7 @@
#include "Error.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/JSON.h"
#include "llvm/Support/StringSaver.h"
#include <functional>
@@ -89,32 +90,32 @@ using AstPtr = std::unique_ptr<ASTNode>;
// and Lambdas that are registered with it.
class Template {
public:
- Template(StringRef TemplateStr);
+ LLVM_ABI Template(StringRef TemplateStr);
Template(const Template &) = delete;
Template &operator=(const Template &) = delete;
- Template(Template &&Other) noexcept;
+ LLVM_ABI Template(Template &&Other) noexcept;
// Define this in the cpp file to work around ASTNode being an incomplete
// type.
- ~Template();
+ LLVM_ABI ~Template();
- Template &operator=(Template &&Other) noexcept;
+ LLVM_ABI Template &operator=(Template &&Other) noexcept;
- void render(const llvm::json::Value &Data, llvm::raw_ostream &OS);
+ LLVM_ABI void render(const llvm::json::Value &Data, llvm::raw_ostream &OS);
- void registerPartial(std::string Name, std::string Partial);
+ LLVM_ABI void registerPartial(std::string Name, std::string Partial);
- void registerLambda(std::string Name, Lambda Lambda);
+ LLVM_ABI void registerLambda(std::string Name, Lambda Lambda);
- void registerLambda(std::string Name, SectionLambda Lambda);
+ LLVM_ABI void registerLambda(std::string Name, SectionLambda Lambda);
// By default the Mustache Spec Specifies that HTML special characters
// should be escaped. This function allows the user to specify which
// characters should be escaped.
- void overrideEscapeCharacters(DenseMap<char, std::string> Escapes);
+ LLVM_ABI void overrideEscapeCharacters(DenseMap<char, std::string> Escapes);
private:
StringMap<AstPtr> Partials;
diff --git a/llvm/include/llvm/Support/NativeFormatting.h b/llvm/include/llvm/Support/NativeFormatting.h
index ab85ae2..4533651 100644
--- a/llvm/include/llvm/Support/NativeFormatting.h
+++ b/llvm/include/llvm/Support/NativeFormatting.h
@@ -9,6 +9,7 @@
#ifndef LLVM_SUPPORT_NATIVEFORMATTING_H
#define LLVM_SUPPORT_NATIVEFORMATTING_H
+#include "llvm/Support/Compiler.h"
#include <cstdint>
#include <optional>
@@ -21,26 +22,27 @@ enum class IntegerStyle {
};
enum class HexPrintStyle { Upper, Lower, PrefixUpper, PrefixLower };
-size_t getDefaultPrecision(FloatStyle Style);
-
-bool isPrefixedHexStyle(HexPrintStyle S);
-
-void write_integer(raw_ostream &S, unsigned int N, size_t MinDigits,
- IntegerStyle Style);
-void write_integer(raw_ostream &S, int N, size_t MinDigits, IntegerStyle Style);
-void write_integer(raw_ostream &S, unsigned long N, size_t MinDigits,
- IntegerStyle Style);
-void write_integer(raw_ostream &S, long N, size_t MinDigits,
- IntegerStyle Style);
-void write_integer(raw_ostream &S, unsigned long long N, size_t MinDigits,
- IntegerStyle Style);
-void write_integer(raw_ostream &S, long long N, size_t MinDigits,
- IntegerStyle Style);
-
-void write_hex(raw_ostream &S, uint64_t N, HexPrintStyle Style,
- std::optional<size_t> Width = std::nullopt);
-void write_double(raw_ostream &S, double D, FloatStyle Style,
- std::optional<size_t> Precision = std::nullopt);
+LLVM_ABI size_t getDefaultPrecision(FloatStyle Style);
+
+LLVM_ABI bool isPrefixedHexStyle(HexPrintStyle S);
+
+LLVM_ABI void write_integer(raw_ostream &S, unsigned int N, size_t MinDigits,
+ IntegerStyle Style);
+LLVM_ABI void write_integer(raw_ostream &S, int N, size_t MinDigits,
+ IntegerStyle Style);
+LLVM_ABI void write_integer(raw_ostream &S, unsigned long N, size_t MinDigits,
+ IntegerStyle Style);
+LLVM_ABI void write_integer(raw_ostream &S, long N, size_t MinDigits,
+ IntegerStyle Style);
+LLVM_ABI void write_integer(raw_ostream &S, unsigned long long N,
+ size_t MinDigits, IntegerStyle Style);
+LLVM_ABI void write_integer(raw_ostream &S, long long N, size_t MinDigits,
+ IntegerStyle Style);
+
+LLVM_ABI void write_hex(raw_ostream &S, uint64_t N, HexPrintStyle Style,
+ std::optional<size_t> Width = std::nullopt);
+LLVM_ABI void write_double(raw_ostream &S, double D, FloatStyle Style,
+ std::optional<size_t> Precision = std::nullopt);
}
#endif
diff --git a/llvm/include/llvm/Support/OptimizedStructLayout.h b/llvm/include/llvm/Support/OptimizedStructLayout.h
index 619990d..8c11ae1 100644
--- a/llvm/include/llvm/Support/OptimizedStructLayout.h
+++ b/llvm/include/llvm/Support/OptimizedStructLayout.h
@@ -35,8 +35,9 @@
#ifndef LLVM_SUPPORT_OPTIMIZEDSTRUCTLAYOUT_H
#define LLVM_SUPPORT_OPTIMIZEDSTRUCTLAYOUT_H
-#include "llvm/Support/Alignment.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/Compiler.h"
#include <utility>
namespace llvm {
@@ -135,8 +136,8 @@ struct OptimizedStructLayoutField {
/// The return value is the total size of the struct and its required
/// alignment. Note that the total size is not rounded up to a multiple
/// of the required alignment; clients which require this can do so easily.
-std::pair<uint64_t, Align> performOptimizedStructLayout(
- MutableArrayRef<OptimizedStructLayoutField> Fields);
+LLVM_ABI std::pair<uint64_t, Align> performOptimizedStructLayout(
+ MutableArrayRef<OptimizedStructLayoutField> Fields);
} // namespace llvm
diff --git a/llvm/include/llvm/Support/PGOOptions.h b/llvm/include/llvm/Support/PGOOptions.h
index de981ab..6527a18 100644
--- a/llvm/include/llvm/Support/PGOOptions.h
+++ b/llvm/include/llvm/Support/PGOOptions.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_PGOOPTIONS_H
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
namespace llvm {
@@ -28,17 +29,19 @@ struct PGOOptions {
enum PGOAction { NoAction, IRInstr, IRUse, SampleUse };
enum CSPGOAction { NoCSAction, CSIRInstr, CSIRUse };
enum class ColdFuncOpt { Default, OptSize, MinSize, OptNone };
- PGOOptions(std::string ProfileFile, std::string CSProfileGenFile,
- std::string ProfileRemappingFile, std::string MemoryProfile,
- IntrusiveRefCntPtr<vfs::FileSystem> FS,
- PGOAction Action = NoAction, CSPGOAction CSAction = NoCSAction,
- ColdFuncOpt ColdType = ColdFuncOpt::Default,
- bool DebugInfoForProfiling = false,
- bool PseudoProbeForProfiling = false,
- bool AtomicCounterUpdate = false);
- PGOOptions(const PGOOptions &);
- ~PGOOptions();
- PGOOptions &operator=(const PGOOptions &);
+ LLVM_ABI PGOOptions(std::string ProfileFile, std::string CSProfileGenFile,
+ std::string ProfileRemappingFile,
+ std::string MemoryProfile,
+ IntrusiveRefCntPtr<vfs::FileSystem> FS,
+ PGOAction Action = NoAction,
+ CSPGOAction CSAction = NoCSAction,
+ ColdFuncOpt ColdType = ColdFuncOpt::Default,
+ bool DebugInfoForProfiling = false,
+ bool PseudoProbeForProfiling = false,
+ bool AtomicCounterUpdate = false);
+ LLVM_ABI PGOOptions(const PGOOptions &);
+ LLVM_ABI ~PGOOptions();
+ LLVM_ABI PGOOptions &operator=(const PGOOptions &);
std::string ProfileFile;
std::string CSProfileGenFile;
diff --git a/llvm/include/llvm/Support/Parallel.h b/llvm/include/llvm/Support/Parallel.h
index c34619a..b0c9e8f 100644
--- a/llvm/include/llvm/Support/Parallel.h
+++ b/llvm/include/llvm/Support/Parallel.h
@@ -11,6 +11,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Threading.h"
@@ -27,7 +28,7 @@ namespace parallel {
// Strategy for the default executor used by the parallel routines provided by
// this file. It defaults to using all hardware threads and should be
// initialized before the first use of parallel routines.
-extern ThreadPoolStrategy strategy;
+LLVM_ABI extern ThreadPoolStrategy strategy;
#if LLVM_ENABLE_THREADS
#define GET_THREAD_INDEX_IMPL \
@@ -41,15 +42,15 @@ extern ThreadPoolStrategy strategy;
#ifdef _WIN32
// Direct access to thread_local variables from a different DLL isn't
// possible with Windows Native TLS.
-unsigned getThreadIndex();
+LLVM_ABI unsigned getThreadIndex();
#else
// Don't access this directly, use the getThreadIndex wrapper.
-extern thread_local unsigned threadIndex;
+LLVM_ABI extern thread_local unsigned threadIndex;
inline unsigned getThreadIndex() { GET_THREAD_INDEX_IMPL; }
#endif
-size_t getThreadCount();
+LLVM_ABI size_t getThreadCount();
#else
inline unsigned getThreadIndex() { return 0; }
inline size_t getThreadCount() { return 1; }
@@ -91,13 +92,13 @@ class TaskGroup {
bool Parallel;
public:
- TaskGroup();
- ~TaskGroup();
+ LLVM_ABI TaskGroup();
+ LLVM_ABI ~TaskGroup();
// Spawn a task, but does not wait for it to finish.
// Tasks marked with \p Sequential will be executed
// exactly in the order which they were spawned.
- void spawn(std::function<void()> f);
+ LLVM_ABI void spawn(std::function<void()> f);
void sync() const { L.sync(); }
@@ -225,7 +226,8 @@ void parallelSort(RandomAccessIterator Start, RandomAccessIterator End,
llvm::sort(Start, End, Comp);
}
-void parallelFor(size_t Begin, size_t End, function_ref<void(size_t)> Fn);
+LLVM_ABI void parallelFor(size_t Begin, size_t End,
+ function_ref<void(size_t)> Fn);
template <class IterTy, class FuncTy>
void parallelForEach(IterTy Begin, IterTy End, FuncTy Fn) {
diff --git a/llvm/include/llvm/Support/Path.h b/llvm/include/llvm/Support/Path.h
index ce549a9..32144a0 100644
--- a/llvm/include/llvm/Support/Path.h
+++ b/llvm/include/llvm/Support/Path.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <iterator>
@@ -79,16 +80,16 @@ class const_iterator
Style S = Style::native; ///< The path style to use.
// An end iterator has Position = Path.size() + 1.
- friend const_iterator begin(StringRef path, Style style);
- friend const_iterator end(StringRef path);
+ LLVM_ABI_FRIEND friend const_iterator begin(StringRef path, Style style);
+ LLVM_ABI_FRIEND friend const_iterator end(StringRef path);
public:
reference operator*() const { return Component; }
- const_iterator &operator++(); // preincrement
- bool operator==(const const_iterator &RHS) const;
+ LLVM_ABI const_iterator &operator++(); // preincrement
+ LLVM_ABI bool operator==(const const_iterator &RHS) const;
/// Difference in bytes between this and RHS.
- ptrdiff_t operator-(const const_iterator &RHS) const;
+ LLVM_ABI ptrdiff_t operator-(const const_iterator &RHS) const;
};
/// Reverse path iterator.
@@ -104,39 +105,39 @@ class reverse_iterator
size_t Position = 0; ///< The iterators current position within Path.
Style S = Style::native; ///< The path style to use.
- friend reverse_iterator rbegin(StringRef path, Style style);
- friend reverse_iterator rend(StringRef path);
+ LLVM_ABI_FRIEND friend reverse_iterator rbegin(StringRef path, Style style);
+ LLVM_ABI_FRIEND friend reverse_iterator rend(StringRef path);
public:
reference operator*() const { return Component; }
- reverse_iterator &operator++(); // preincrement
- bool operator==(const reverse_iterator &RHS) const;
+ LLVM_ABI reverse_iterator &operator++(); // preincrement
+ LLVM_ABI bool operator==(const reverse_iterator &RHS) const;
/// Difference in bytes between this and RHS.
- ptrdiff_t operator-(const reverse_iterator &RHS) const;
+ LLVM_ABI ptrdiff_t operator-(const reverse_iterator &RHS) const;
};
/// Get begin iterator over \a path.
/// @param path Input path.
/// @returns Iterator initialized with the first component of \a path.
-const_iterator begin(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI const_iterator begin(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get end iterator over \a path.
/// @param path Input path.
/// @returns Iterator initialized to the end of \a path.
-const_iterator end(StringRef path LLVM_LIFETIME_BOUND);
+LLVM_ABI const_iterator end(StringRef path LLVM_LIFETIME_BOUND);
/// Get reverse begin iterator over \a path.
/// @param path Input path.
/// @returns Iterator initialized with the first reverse component of \a path.
-reverse_iterator rbegin(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI reverse_iterator rbegin(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get reverse end iterator over \a path.
/// @param path Input path.
/// @returns Iterator initialized to the reverse end of \a path.
-reverse_iterator rend(StringRef path LLVM_LIFETIME_BOUND);
+LLVM_ABI reverse_iterator rend(StringRef path LLVM_LIFETIME_BOUND);
/// @}
/// @name Lexical Modifiers
@@ -154,7 +155,8 @@ reverse_iterator rend(StringRef path LLVM_LIFETIME_BOUND);
/// @endcode
///
/// @param path A path that is modified to not have a file component.
-void remove_filename(SmallVectorImpl<char> &path, Style style = Style::native);
+LLVM_ABI void remove_filename(SmallVectorImpl<char> &path,
+ Style style = Style::native);
/// Replace the file extension of \a path with \a extension.
///
@@ -168,8 +170,9 @@ void remove_filename(SmallVectorImpl<char> &path, Style style = Style::native);
/// @param extension The extension to be added. It may be empty. It may also
/// optionally start with a '.', if it does not, one will be
/// prepended.
-void replace_extension(SmallVectorImpl<char> &path, const Twine &extension,
- Style style = Style::native);
+LLVM_ABI void replace_extension(SmallVectorImpl<char> &path,
+ const Twine &extension,
+ Style style = Style::native);
/// Replace matching path prefix with another path.
///
@@ -193,16 +196,16 @@ void replace_extension(SmallVectorImpl<char> &path, const Twine &extension,
/// @param style The style used to match the prefix. Exact match using
/// Posix style, case/separator insensitive match for Windows style.
/// @result true if \a Path begins with OldPrefix
-bool replace_path_prefix(SmallVectorImpl<char> &Path, StringRef OldPrefix,
- StringRef NewPrefix,
- Style style = Style::native);
+LLVM_ABI bool replace_path_prefix(SmallVectorImpl<char> &Path,
+ StringRef OldPrefix, StringRef NewPrefix,
+ Style style = Style::native);
/// Remove redundant leading "./" pieces and consecutive separators.
///
/// @param path Input path.
/// @result The cleaned-up \a path.
-StringRef remove_leading_dotslash(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI StringRef remove_leading_dotslash(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// In-place remove any './' and optionally '../' components from a path.
///
@@ -210,8 +213,9 @@ StringRef remove_leading_dotslash(StringRef path LLVM_LIFETIME_BOUND,
/// @param remove_dot_dot specify if '../' (except for leading "../") should be
/// removed
/// @result True if path was changed
-bool remove_dots(SmallVectorImpl<char> &path, bool remove_dot_dot = false,
- Style style = Style::native);
+LLVM_ABI bool remove_dots(SmallVectorImpl<char> &path,
+ bool remove_dot_dot = false,
+ Style style = Style::native);
/// Append to path.
///
@@ -223,13 +227,13 @@ bool remove_dots(SmallVectorImpl<char> &path, bool remove_dot_dot = false,
///
/// @param path Set to \a path + \a component.
/// @param a The component to be appended to \a path.
-void append(SmallVectorImpl<char> &path, const Twine &a,
- const Twine &b = "",
- const Twine &c = "",
- const Twine &d = "");
+LLVM_ABI void append(SmallVectorImpl<char> &path, const Twine &a,
+ const Twine &b = "", const Twine &c = "",
+ const Twine &d = "");
-void append(SmallVectorImpl<char> &path, Style style, const Twine &a,
- const Twine &b = "", const Twine &c = "", const Twine &d = "");
+LLVM_ABI void append(SmallVectorImpl<char> &path, Style style, const Twine &a,
+ const Twine &b = "", const Twine &c = "",
+ const Twine &d = "");
/// Append to path.
///
@@ -242,8 +246,8 @@ void append(SmallVectorImpl<char> &path, Style style, const Twine &a,
/// @param path Set to \a path + [\a begin, \a end).
/// @param begin Start of components to append.
/// @param end One past the end of components to append.
-void append(SmallVectorImpl<char> &path, const_iterator begin,
- const_iterator end, Style style = Style::native);
+LLVM_ABI void append(SmallVectorImpl<char> &path, const_iterator begin,
+ const_iterator end, Style style = Style::native);
/// @}
/// @name Transforms (or some other better name)
@@ -255,15 +259,15 @@ void append(SmallVectorImpl<char> &path, const_iterator begin,
///
/// @param path A path that is transformed to native format.
/// @param result Holds the result of the transformation.
-void native(const Twine &path, SmallVectorImpl<char> &result,
- Style style = Style::native);
+LLVM_ABI void native(const Twine &path, SmallVectorImpl<char> &result,
+ Style style = Style::native);
/// Convert path to the native form in place. This is used to give paths to
/// users and operating system calls in the platform's normal way. For example,
/// on Windows all '/' are converted to '\'.
///
/// @param path A path that is transformed to native format.
-void native(SmallVectorImpl<char> &path, Style style = Style::native);
+LLVM_ABI void native(SmallVectorImpl<char> &path, Style style = Style::native);
/// For Windows path styles, convert path to use the preferred path separators.
/// For other styles, do nothing.
@@ -282,7 +286,8 @@ inline void make_preferred(SmallVectorImpl<char> &path,
/// @result The result of replacing backslashes with forward slashes if Windows.
/// On Unix, this function is a no-op because backslashes are valid path
/// chracters.
-std::string convert_to_slash(StringRef path, Style style = Style::native);
+LLVM_ABI std::string convert_to_slash(StringRef path,
+ Style style = Style::native);
/// @}
/// @name Lexical Observers
@@ -298,8 +303,8 @@ std::string convert_to_slash(StringRef path, Style style = Style::native);
///
/// @param path Input path.
/// @result The root name of \a path if it has one, otherwise "".
-StringRef root_name(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI StringRef root_name(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get root directory.
///
@@ -312,8 +317,8 @@ StringRef root_name(StringRef path LLVM_LIFETIME_BOUND,
/// @param path Input path.
/// @result The root directory of \a path if it has one, otherwise
/// "".
-StringRef root_directory(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI StringRef root_directory(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get root path.
///
@@ -321,8 +326,8 @@ StringRef root_directory(StringRef path LLVM_LIFETIME_BOUND,
///
/// @param path Input path.
/// @result The root path of \a path if it has one, otherwise "".
-StringRef root_path(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI StringRef root_path(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get relative path.
///
@@ -334,8 +339,8 @@ StringRef root_path(StringRef path LLVM_LIFETIME_BOUND,
///
/// @param path Input path.
/// @result The path starting after root_path if one exists, otherwise "".
-StringRef relative_path(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI StringRef relative_path(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get parent path.
///
@@ -347,8 +352,8 @@ StringRef relative_path(StringRef path LLVM_LIFETIME_BOUND,
///
/// @param path Input path.
/// @result The parent path of \a path if one exists, otherwise "".
-StringRef parent_path(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI StringRef parent_path(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get filename.
///
@@ -362,8 +367,8 @@ StringRef parent_path(StringRef path LLVM_LIFETIME_BOUND,
/// @param path Input path.
/// @result The filename part of \a path. This is defined as the last component
/// of \a path. Similar to the POSIX "basename" utility.
-StringRef filename(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI StringRef filename(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get stem.
///
@@ -381,7 +386,8 @@ StringRef filename(StringRef path LLVM_LIFETIME_BOUND,
///
/// @param path Input path.
/// @result The stem of \a path.
-StringRef stem(StringRef path LLVM_LIFETIME_BOUND, Style style = Style::native);
+LLVM_ABI StringRef stem(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Get extension.
///
@@ -397,19 +403,19 @@ StringRef stem(StringRef path LLVM_LIFETIME_BOUND, Style style = Style::native);
///
/// @param path Input path.
/// @result The extension of \a path.
-StringRef extension(StringRef path LLVM_LIFETIME_BOUND,
- Style style = Style::native);
+LLVM_ABI StringRef extension(StringRef path LLVM_LIFETIME_BOUND,
+ Style style = Style::native);
/// Check whether the given char is a path separator on the host OS.
///
/// @param value a character
/// @result true if \a value is a path separator character on the host OS
-bool is_separator(char value, Style style = Style::native);
+LLVM_ABI bool is_separator(char value, Style style = Style::native);
/// Return the preferred separator for this platform.
///
/// @result StringRef of the preferred separator, null-terminated.
-StringRef get_separator(Style style = Style::native);
+LLVM_ABI StringRef get_separator(Style style = Style::native);
/// Get the typical temporary directory for the system, e.g.,
/// "/var/tmp" or "C:/TEMP"
@@ -420,27 +426,28 @@ StringRef get_separator(Style style = Style::native);
/// (e.g., TEMP on Windows, TMPDIR on *nix) to specify a temporary directory.
///
/// @param result Holds the resulting path name.
-void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result);
+LLVM_ABI void system_temp_directory(bool erasedOnReboot,
+ SmallVectorImpl<char> &result);
/// Get the user's home directory.
///
/// @param result Holds the resulting path name.
/// @result True if a home directory is set, false otherwise.
-bool home_directory(SmallVectorImpl<char> &result);
+LLVM_ABI bool home_directory(SmallVectorImpl<char> &result);
/// Get the directory where packages should read user-specific configurations.
/// e.g. $XDG_CONFIG_HOME.
///
/// @param result Holds the resulting path name.
/// @result True if the appropriate path was determined, it need not exist.
-bool user_config_directory(SmallVectorImpl<char> &result);
+LLVM_ABI bool user_config_directory(SmallVectorImpl<char> &result);
/// Get the directory where installed packages should put their
/// machine-local cache, e.g. $XDG_CACHE_HOME.
///
/// @param result Holds the resulting path name.
/// @result True if the appropriate path was determined, it need not exist.
-bool cache_directory(SmallVectorImpl<char> &result);
+LLVM_ABI bool cache_directory(SmallVectorImpl<char> &result);
/// Has root name?
///
@@ -448,7 +455,7 @@ bool cache_directory(SmallVectorImpl<char> &result);
///
/// @param path Input path.
/// @result True if the path has a root name, false otherwise.
-bool has_root_name(const Twine &path, Style style = Style::native);
+LLVM_ABI bool has_root_name(const Twine &path, Style style = Style::native);
/// Has root directory?
///
@@ -456,7 +463,8 @@ bool has_root_name(const Twine &path, Style style = Style::native);
///
/// @param path Input path.
/// @result True if the path has a root directory, false otherwise.
-bool has_root_directory(const Twine &path, Style style = Style::native);
+LLVM_ABI bool has_root_directory(const Twine &path,
+ Style style = Style::native);
/// Has root path?
///
@@ -464,7 +472,7 @@ bool has_root_directory(const Twine &path, Style style = Style::native);
///
/// @param path Input path.
/// @result True if the path has a root path, false otherwise.
-bool has_root_path(const Twine &path, Style style = Style::native);
+LLVM_ABI bool has_root_path(const Twine &path, Style style = Style::native);
/// Has relative path?
///
@@ -472,7 +480,7 @@ bool has_root_path(const Twine &path, Style style = Style::native);
///
/// @param path Input path.
/// @result True if the path has a relative path, false otherwise.
-bool has_relative_path(const Twine &path, Style style = Style::native);
+LLVM_ABI bool has_relative_path(const Twine &path, Style style = Style::native);
/// Has parent path?
///
@@ -480,7 +488,7 @@ bool has_relative_path(const Twine &path, Style style = Style::native);
///
/// @param path Input path.
/// @result True if the path has a parent path, false otherwise.
-bool has_parent_path(const Twine &path, Style style = Style::native);
+LLVM_ABI bool has_parent_path(const Twine &path, Style style = Style::native);
/// Has filename?
///
@@ -488,7 +496,7 @@ bool has_parent_path(const Twine &path, Style style = Style::native);
///
/// @param path Input path.
/// @result True if the path has a filename, false otherwise.
-bool has_filename(const Twine &path, Style style = Style::native);
+LLVM_ABI bool has_filename(const Twine &path, Style style = Style::native);
/// Has stem?
///
@@ -496,7 +504,7 @@ bool has_filename(const Twine &path, Style style = Style::native);
///
/// @param path Input path.
/// @result True if the path has a stem, false otherwise.
-bool has_stem(const Twine &path, Style style = Style::native);
+LLVM_ABI bool has_stem(const Twine &path, Style style = Style::native);
/// Has extension?
///
@@ -504,7 +512,7 @@ bool has_stem(const Twine &path, Style style = Style::native);
///
/// @param path Input path.
/// @result True if the path has a extension, false otherwise.
-bool has_extension(const Twine &path, Style style = Style::native);
+LLVM_ABI bool has_extension(const Twine &path, Style style = Style::native);
/// Is path absolute?
///
@@ -523,7 +531,7 @@ bool has_extension(const Twine &path, Style style = Style::native);
///
/// @param path Input path.
/// @result True if the path is absolute, false if it is not.
-bool is_absolute(const Twine &path, Style style = Style::native);
+LLVM_ABI bool is_absolute(const Twine &path, Style style = Style::native);
/// Is path absolute using GNU rules?
///
@@ -548,13 +556,13 @@ bool is_absolute(const Twine &path, Style style = Style::native);
/// means to derive the style from the host.
/// @result True if the path is absolute following GNU rules, false if it is
/// not.
-bool is_absolute_gnu(const Twine &path, Style style = Style::native);
+LLVM_ABI bool is_absolute_gnu(const Twine &path, Style style = Style::native);
/// Is path relative?
///
/// @param path Input path.
/// @result True if the path is relative, false if it is not.
-bool is_relative(const Twine &path, Style style = Style::native);
+LLVM_ABI bool is_relative(const Twine &path, Style style = Style::native);
} // end namespace path
} // end namespace sys
diff --git a/llvm/include/llvm/Support/PluginLoader.h b/llvm/include/llvm/Support/PluginLoader.h
index bdd3636..1badf41 100644
--- a/llvm/include/llvm/Support/PluginLoader.h
+++ b/llvm/include/llvm/Support/PluginLoader.h
@@ -16,6 +16,8 @@
#ifndef LLVM_SUPPORT_PLUGINLOADER_H
#define LLVM_SUPPORT_PLUGINLOADER_H
+#include "llvm/Support/Compiler.h"
+
#ifndef DONT_GET_PLUGIN_LOADER_OPTION
#include "llvm/Support/CommandLine.h"
#endif
@@ -24,9 +26,9 @@
namespace llvm {
struct PluginLoader {
- void operator=(const std::string &Filename);
- static unsigned getNumPlugins();
- static std::string& getPlugin(unsigned num);
+ LLVM_ABI void operator=(const std::string &Filename);
+ LLVM_ABI static unsigned getNumPlugins();
+ LLVM_ABI static std::string &getPlugin(unsigned num);
};
#ifndef DONT_GET_PLUGIN_LOADER_OPTION
diff --git a/llvm/include/llvm/Support/PrettyStackTrace.h b/llvm/include/llvm/Support/PrettyStackTrace.h
index ac25cff..33d95f1 100644
--- a/llvm/include/llvm/Support/PrettyStackTrace.h
+++ b/llvm/include/llvm/Support/PrettyStackTrace.h
@@ -24,7 +24,7 @@ namespace llvm {
/// Enables dumping a "pretty" stack trace when the program crashes.
///
/// \see PrettyStackTraceEntry
- void EnablePrettyStackTrace();
+ LLVM_ABI void EnablePrettyStackTrace();
/// Enables (or disables) dumping a "pretty" stack trace when the user sends
/// SIGINFO or SIGUSR1 to the current process.
@@ -35,22 +35,24 @@ namespace llvm {
///
/// \see EnablePrettyStackTrace
/// \see PrettyStackTraceEntry
- void EnablePrettyStackTraceOnSigInfoForThisThread(bool ShouldEnable = true);
+ LLVM_ABI void
+ EnablePrettyStackTraceOnSigInfoForThisThread(bool ShouldEnable = true);
/// Replaces the generic bug report message that is output upon
/// a crash.
- void setBugReportMsg(const char *Msg);
+ LLVM_ABI void setBugReportMsg(const char *Msg);
/// Get the bug report message that will be output upon a crash.
- const char *getBugReportMsg();
+ LLVM_ABI const char *getBugReportMsg();
/// PrettyStackTraceEntry - This class is used to represent a frame of the
/// "pretty" stack trace that is dumped when a program crashes. You can define
/// subclasses of this and declare them on the program stack: when they are
/// constructed and destructed, they will add their symbolic frames to a
/// virtual stack trace. This gets dumped out if the program crashes.
- class PrettyStackTraceEntry {
- friend PrettyStackTraceEntry *ReverseStackTrace(PrettyStackTraceEntry *);
+ class LLVM_ABI PrettyStackTraceEntry {
+ LLVM_ABI_FRIEND friend PrettyStackTraceEntry *
+ ReverseStackTrace(PrettyStackTraceEntry *);
PrettyStackTraceEntry *NextEntry;
PrettyStackTraceEntry(const PrettyStackTraceEntry &) = delete;
@@ -69,7 +71,7 @@ namespace llvm {
/// PrettyStackTraceString - This object prints a specified string (which
/// should not contain newlines) to the stream as the stack trace when a crash
/// occurs.
- class PrettyStackTraceString : public PrettyStackTraceEntry {
+ class LLVM_ABI PrettyStackTraceString : public PrettyStackTraceEntry {
const char *Str;
public:
PrettyStackTraceString(const char *str) : Str(str) {}
@@ -79,7 +81,7 @@ namespace llvm {
/// PrettyStackTraceFormat - This object prints a string (which may use
/// printf-style formatting but should not contain newlines) to the stream
/// as the stack trace when a crash occurs.
- class PrettyStackTraceFormat : public PrettyStackTraceEntry {
+ class LLVM_ABI PrettyStackTraceFormat : public PrettyStackTraceEntry {
llvm::SmallVector<char, 32> Str;
public:
PrettyStackTraceFormat(const char *Format, ...);
@@ -88,7 +90,7 @@ namespace llvm {
/// PrettyStackTraceProgram - This object prints a specified program arguments
/// to the stream as the stack trace when a crash occurs.
- class PrettyStackTraceProgram : public PrettyStackTraceEntry {
+ class LLVM_ABI PrettyStackTraceProgram : public PrettyStackTraceEntry {
int ArgC;
const char *const *ArgV;
public:
@@ -100,7 +102,7 @@ namespace llvm {
};
/// Returns the topmost element of the "pretty" stack state.
- const void *SavePrettyStackState();
+ LLVM_ABI const void *SavePrettyStackState();
/// Restores the topmost element of the "pretty" stack state to State, which
/// should come from a previous call to SavePrettyStackState(). This is
@@ -109,7 +111,7 @@ namespace llvm {
/// happens after a crash that's been recovered by CrashRecoveryContext
/// doesn't have frames on it that were added in code unwound by the
/// CrashRecoveryContext.
- void RestorePrettyStackState(const void *State);
+ LLVM_ABI void RestorePrettyStackState(const void *State);
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/Process.h b/llvm/include/llvm/Support/Process.h
index 83f1fcd..f644fe7 100644
--- a/llvm/include/llvm/Support/Process.h
+++ b/llvm/include/llvm/Support/Process.h
@@ -25,6 +25,7 @@
#define LLVM_SUPPORT_PROCESS_H
#include "llvm/Support/Chrono.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Program.h"
@@ -45,14 +46,14 @@ public:
using Pid = int32_t;
/// Get the process's identifier.
- static Pid getProcessId();
+ LLVM_ABI static Pid getProcessId();
/// Get the process's page size.
/// This may fail if the underlying syscall returns an error. In most cases,
/// page size information is used for optimization, and this error can be
/// safely discarded by calling consumeError, and an estimated page size
/// substituted instead.
- static Expected<unsigned> getPageSize();
+ LLVM_ABI static Expected<unsigned> getPageSize();
/// Get the process's estimated page size.
/// This function always succeeds, but if the underlying syscall to determine
@@ -72,7 +73,7 @@ public:
/// by the process. This only counts the memory allocated via the malloc,
/// calloc and realloc functions and includes any "free" holes in the
/// allocated space.
- static size_t GetMallocUsage();
+ LLVM_ABI static size_t GetMallocUsage();
/// This static function will set \p user_time to the amount of CPU time
/// spent in user (non-kernel) mode and \p sys_time to the amount of CPU
@@ -82,22 +83,22 @@ public:
/// \param elapsed Returns the system_clock::now() giving current time
/// \param user_time Returns the current amount of user time for the process
/// \param sys_time Returns the current amount of system time for the process
- static void GetTimeUsage(TimePoint<> &elapsed,
- std::chrono::nanoseconds &user_time,
- std::chrono::nanoseconds &sys_time);
+ LLVM_ABI static void GetTimeUsage(TimePoint<> &elapsed,
+ std::chrono::nanoseconds &user_time,
+ std::chrono::nanoseconds &sys_time);
/// This function makes the necessary calls to the operating system to
/// prevent core files or any other kind of large memory dumps that can
/// occur when a program fails.
/// Prevent core file generation.
- static void PreventCoreFiles();
+ LLVM_ABI static void PreventCoreFiles();
/// true if PreventCoreFiles has been called, false otherwise.
- static bool AreCoreFilesPrevented();
+ LLVM_ABI static bool AreCoreFilesPrevented();
// This function returns the environment variable \arg name's value as a UTF-8
// string. \arg Name is assumed to be in UTF-8 encoding too.
- static std::optional<std::string> GetEnv(StringRef name);
+ LLVM_ABI static std::optional<std::string> GetEnv(StringRef name);
/// This function searches for an existing file in the list of directories
/// in a PATH like environment variable, and returns the first file found,
@@ -105,12 +106,12 @@ public:
/// variable. If an ignore list is specified, then any folder which is in
/// the PATH like environment variable but is also in IgnoreList is not
/// considered.
- static std::optional<std::string>
+ LLVM_ABI static std::optional<std::string>
FindInEnvPath(StringRef EnvName, StringRef FileName,
ArrayRef<std::string> IgnoreList,
char Separator = EnvPathSeparator);
- static std::optional<std::string>
+ LLVM_ABI static std::optional<std::string>
FindInEnvPath(StringRef EnvName, StringRef FileName,
char Separator = EnvPathSeparator);
@@ -118,7 +119,7 @@ public:
// and error) are properly mapped to a file descriptor before we use any of
// them. This should only be called by standalone programs, library
// components should not call this.
- static std::error_code FixupStandardFileDescriptors();
+ LLVM_ABI static std::error_code FixupStandardFileDescriptors();
// This function safely closes a file descriptor. It is not safe to retry
// close(2) when it returns with errno equivalent to EINTR; this is because
@@ -127,93 +128,93 @@ public:
//
// N.B. Some operating systems, due to thread cancellation, cannot properly
// guarantee that it will or will not be closed one way or the other!
- static std::error_code SafelyCloseFileDescriptor(int FD);
+ LLVM_ABI static std::error_code SafelyCloseFileDescriptor(int FD);
/// This function determines if the standard input is connected directly
/// to a user's input (keyboard probably), rather than coming from a file
/// or pipe.
- static bool StandardInIsUserInput();
+ LLVM_ABI static bool StandardInIsUserInput();
/// This function determines if the standard output is connected to a
/// "tty" or "console" window. That is, the output would be displayed to
/// the user rather than being put on a pipe or stored in a file.
- static bool StandardOutIsDisplayed();
+ LLVM_ABI static bool StandardOutIsDisplayed();
/// This function determines if the standard error is connected to a
/// "tty" or "console" window. That is, the output would be displayed to
/// the user rather than being put on a pipe or stored in a file.
- static bool StandardErrIsDisplayed();
+ LLVM_ABI static bool StandardErrIsDisplayed();
/// This function determines if the given file descriptor is connected to
/// a "tty" or "console" window. That is, the output would be displayed to
/// the user rather than being put on a pipe or stored in a file.
- static bool FileDescriptorIsDisplayed(int fd);
+ LLVM_ABI static bool FileDescriptorIsDisplayed(int fd);
/// This function determines if the given file descriptor is displayd and
/// supports colors.
- static bool FileDescriptorHasColors(int fd);
+ LLVM_ABI static bool FileDescriptorHasColors(int fd);
/// This function determines the number of columns in the window
/// if standard output is connected to a "tty" or "console"
/// window. If standard output is not connected to a tty or
/// console, or if the number of columns cannot be determined,
/// this routine returns zero.
- static unsigned StandardOutColumns();
+ LLVM_ABI static unsigned StandardOutColumns();
/// This function determines the number of columns in the window
/// if standard error is connected to a "tty" or "console"
/// window. If standard error is not connected to a tty or
/// console, or if the number of columns cannot be determined,
/// this routine returns zero.
- static unsigned StandardErrColumns();
+ LLVM_ABI static unsigned StandardErrColumns();
/// This function determines whether the terminal connected to standard
/// output supports colors. If standard output is not connected to a
/// terminal, this function returns false.
- static bool StandardOutHasColors();
+ LLVM_ABI static bool StandardOutHasColors();
/// This function determines whether the terminal connected to standard
/// error supports colors. If standard error is not connected to a
/// terminal, this function returns false.
- static bool StandardErrHasColors();
+ LLVM_ABI static bool StandardErrHasColors();
/// Enables or disables whether ANSI escape sequences are used to output
/// colors. This only has an effect on Windows.
/// Note: Setting this option is not thread-safe and should only be done
/// during initialization.
- static void UseANSIEscapeCodes(bool enable);
+ LLVM_ABI static void UseANSIEscapeCodes(bool enable);
/// Whether changing colors requires the output to be flushed.
/// This is needed on systems that don't support escape sequences for
/// changing colors.
- static bool ColorNeedsFlush();
+ LLVM_ABI static bool ColorNeedsFlush();
/// This function returns the colorcode escape sequences.
/// If ColorNeedsFlush() is true then this function will change the colors
/// and return an empty escape sequence. In that case it is the
/// responsibility of the client to flush the output stream prior to
/// calling this function.
- static const char *OutputColor(char c, bool bold, bool bg);
+ LLVM_ABI static const char *OutputColor(char c, bool bold, bool bg);
/// Same as OutputColor, but only enables the bold attribute.
- static const char *OutputBold(bool bg);
+ LLVM_ABI static const char *OutputBold(bool bg);
/// This function returns the escape sequence to reverse forground and
/// background colors.
- static const char *OutputReverse();
+ LLVM_ABI static const char *OutputReverse();
/// Resets the terminals colors, or returns an escape sequence to do so.
- static const char *ResetColor();
+ LLVM_ABI static const char *ResetColor();
/// Get the result of a process wide random number generator. The
/// generator will be automatically seeded in non-deterministic fashion.
- static unsigned GetRandomNumber();
+ LLVM_ABI static unsigned GetRandomNumber();
/// Equivalent to ::exit(), except when running inside a CrashRecoveryContext.
/// In that case, the control flow will resume after RunSafely(), like for a
/// crash, rather than exiting the current process.
/// Use \arg NoCleanup for calling _exit() instead of exit().
- [[noreturn]] static void Exit(int RetCode, bool NoCleanup = false);
+ [[noreturn]] LLVM_ABI static void Exit(int RetCode, bool NoCleanup = false);
private:
[[noreturn]] static void ExitNoCleanup(int RetCode);
diff --git a/llvm/include/llvm/Support/Program.h b/llvm/include/llvm/Support/Program.h
index 7ef532d..da5685d 100644
--- a/llvm/include/llvm/Support/Program.h
+++ b/llvm/include/llvm/Support/Program.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include <chrono>
@@ -52,7 +53,7 @@ struct ProcessInfo {
/// The return code, set after execution.
int ReturnCode;
- ProcessInfo();
+ LLVM_ABI ProcessInfo();
};
/// This struct encapsulates information about a process execution.
@@ -75,20 +76,20 @@ struct ProcessStatistics {
///
/// \returns The fully qualified path to the first \p Name in \p Paths if it
/// exists. \p Name if \p Name has slashes in it. Otherwise an error.
-ErrorOr<std::string> findProgramByName(StringRef Name,
- ArrayRef<StringRef> Paths = {});
+LLVM_ABI ErrorOr<std::string> findProgramByName(StringRef Name,
+ ArrayRef<StringRef> Paths = {});
// These functions change the specified standard stream (stdin or stdout) mode
// based on the Flags. They return errc::success if the specified stream was
// changed. Otherwise, a platform dependent error is returned.
-std::error_code ChangeStdinMode(fs::OpenFlags Flags);
-std::error_code ChangeStdoutMode(fs::OpenFlags Flags);
+LLVM_ABI std::error_code ChangeStdinMode(fs::OpenFlags Flags);
+LLVM_ABI std::error_code ChangeStdoutMode(fs::OpenFlags Flags);
// These functions change the specified standard stream (stdin or stdout) to
// binary mode. They return errc::success if the specified stream
// was changed. Otherwise a platform dependent error is returned.
-std::error_code ChangeStdinToBinary();
-std::error_code ChangeStdoutToBinary();
+LLVM_ABI std::error_code ChangeStdinToBinary();
+LLVM_ABI std::error_code ChangeStdoutToBinary();
/// This function executes the program using the arguments provided. The
/// invoked program will inherit the stdin, stdout, and stderr file
@@ -101,7 +102,7 @@ std::error_code ChangeStdoutToBinary();
/// A zero or positive value indicates the result code of the program.
/// -1 indicates failure to execute
/// -2 indicates a crash during execution or timeout
-int ExecuteAndWait(
+LLVM_ABI int ExecuteAndWait(
StringRef Program, ///< Path of the program to be executed. It is
///< presumed this is the result of the findProgramByName method.
ArrayRef<StringRef> Args, ///< An array of strings that are passed to the
@@ -146,7 +147,7 @@ int ExecuteAndWait(
/// \note On Microsoft Windows systems, users will need to either call
/// \ref Wait until the process has finished executing or win32's CloseHandle
/// API on ProcessInfo.ProcessHandle to avoid memory leaks.
-ProcessInfo ExecuteNoWait(
+LLVM_ABI ProcessInfo ExecuteNoWait(
StringRef Program, ArrayRef<StringRef> Args,
std::optional<ArrayRef<StringRef>> Env,
ArrayRef<std::optional<StringRef>> Redirects = {}, unsigned MemoryLimit = 0,
@@ -159,13 +160,13 @@ ProcessInfo ExecuteNoWait(
/// Return true if the given arguments fit within system-specific
/// argument length limits.
-bool commandLineFitsWithinSystemLimits(StringRef Program,
- ArrayRef<StringRef> Args);
+LLVM_ABI bool commandLineFitsWithinSystemLimits(StringRef Program,
+ ArrayRef<StringRef> Args);
/// Return true if the given arguments fit within system-specific
/// argument length limits.
-bool commandLineFitsWithinSystemLimits(StringRef Program,
- ArrayRef<const char *> Args);
+LLVM_ABI bool commandLineFitsWithinSystemLimits(StringRef Program,
+ ArrayRef<const char *> Args);
/// File encoding options when writing contents that a non-UTF8 tool will
/// read (on Windows systems). For UNIX, we always use UTF-8.
@@ -197,7 +198,7 @@ enum WindowsEncodingMethod {
/// should be changed as soon as binutils fix this to support UTF16 on mingw.
///
/// \returns non-zero error_code if failed
-std::error_code
+LLVM_ABI std::error_code
writeFileWithEncoding(StringRef FileName, StringRef Contents,
WindowsEncodingMethod Encoding = WEM_UTF8);
@@ -208,38 +209,39 @@ writeFileWithEncoding(StringRef FileName, StringRef Contents,
/// \li 0 if the child process has not changed state.
/// \note Users of this function should always check the ReturnCode member of
/// the \see ProcessInfo returned from this function.
-ProcessInfo
-Wait(const ProcessInfo &PI, ///< The child process that should be waited on.
- std::optional<unsigned> SecondsToWait, ///< If std::nullopt, waits until
- ///< child has terminated.
- ///< If a value, this specifies the amount of time to wait for the child
- ///< process. If the time expires, and \p Polling is false, the child is
- ///< killed and this < function returns. If the time expires and \p
- ///< Polling is true, the child is resumed.
- ///<
- ///< If zero, this function will perform a non-blocking
- ///< wait on the child process.
- std::string *ErrMsg = nullptr, ///< If non-zero, provides a pointer to a
- ///< string instance in which error messages will be returned. If the
- ///< string is non-empty upon return an error occurred while invoking the
- ///< program.
- std::optional<ProcessStatistics> *ProcStat =
- nullptr, ///< If non-zero, provides
- /// a pointer to a structure in which process execution statistics will
- /// be stored.
-
- bool Polling = false ///< If true, do not kill the process on timeout.
+LLVM_ABI ProcessInfo Wait(
+ const ProcessInfo &PI, ///< The child process that should be waited on.
+ std::optional<unsigned> SecondsToWait, ///< If std::nullopt, waits until
+ ///< child has terminated.
+ ///< If a value, this specifies the amount of time to wait for the child
+ ///< process. If the time expires, and \p Polling is false, the child is
+ ///< killed and this < function returns. If the time expires and \p
+ ///< Polling is true, the child is resumed.
+ ///<
+ ///< If zero, this function will perform a non-blocking
+ ///< wait on the child process.
+ std::string *ErrMsg = nullptr, ///< If non-zero, provides a pointer to a
+ ///< string instance in which error messages will be returned. If the
+ ///< string is non-empty upon return an error occurred while invoking the
+ ///< program.
+ std::optional<ProcessStatistics> *ProcStat =
+ nullptr, ///< If non-zero, provides
+ /// a pointer to a structure in which process execution statistics will
+ /// be stored.
+
+ bool Polling = false ///< If true, do not kill the process on timeout.
);
/// Print a command argument, and optionally quote it.
-void printArg(llvm::raw_ostream &OS, StringRef Arg, bool Quote);
+LLVM_ABI void printArg(llvm::raw_ostream &OS, StringRef Arg, bool Quote);
#if defined(_WIN32)
/// Given a list of command line arguments, quote and escape them as necessary
/// to build a single flat command line appropriate for calling CreateProcess
/// on
/// Windows.
-ErrorOr<std::wstring> flattenWindowsCommandLine(ArrayRef<StringRef> Args);
+LLVM_ABI ErrorOr<std::wstring>
+flattenWindowsCommandLine(ArrayRef<StringRef> Args);
#endif
} // namespace sys
} // namespace llvm
diff --git a/llvm/include/llvm/Support/RISCVAttributeParser.h b/llvm/include/llvm/Support/RISCVAttributeParser.h
index 4a74ed32..cc11f4b 100644
--- a/llvm/include/llvm/Support/RISCVAttributeParser.h
+++ b/llvm/include/llvm/Support/RISCVAttributeParser.h
@@ -9,11 +9,12 @@
#ifndef LLVM_SUPPORT_RISCVATTRIBUTEPARSER_H
#define LLVM_SUPPORT_RISCVATTRIBUTEPARSER_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttrParserCompact.h"
#include "llvm/Support/RISCVAttributes.h"
namespace llvm {
-class RISCVAttributeParser : public ELFCompactAttrParser {
+class LLVM_ABI RISCVAttributeParser : public ELFCompactAttrParser {
struct DisplayHandler {
RISCVAttrs::AttrType attribute;
Error (RISCVAttributeParser::*routine)(unsigned);
diff --git a/llvm/include/llvm/Support/RISCVAttributes.h b/llvm/include/llvm/Support/RISCVAttributes.h
index 07476e8..3388c52 100644
--- a/llvm/include/llvm/Support/RISCVAttributes.h
+++ b/llvm/include/llvm/Support/RISCVAttributes.h
@@ -17,12 +17,13 @@
#ifndef LLVM_SUPPORT_RISCVATTRIBUTES_H
#define LLVM_SUPPORT_RISCVATTRIBUTES_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ELFAttributes.h"
namespace llvm {
namespace RISCVAttrs {
-const TagNameMap &getRISCVAttributeTags();
+LLVM_ABI const TagNameMap &getRISCVAttributeTags();
enum AttrType : unsigned {
// Attribute types in ELF/.riscv.attributes.
diff --git a/llvm/include/llvm/Support/RISCVISAUtils.h b/llvm/include/llvm/Support/RISCVISAUtils.h
index 77f8c3e..165bb08 100644
--- a/llvm/include/llvm/Support/RISCVISAUtils.h
+++ b/llvm/include/llvm/Support/RISCVISAUtils.h
@@ -14,6 +14,7 @@
#define LLVM_SUPPORT_RISCVISAUTILS_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include <map>
#include <string>
@@ -28,7 +29,7 @@ struct ExtensionVersion {
unsigned Minor;
};
-bool compareExtension(const std::string &LHS, const std::string &RHS);
+LLVM_ABI bool compareExtension(const std::string &LHS, const std::string &RHS);
/// Helper class for OrderedExtensionMap.
struct ExtensionComparator {
diff --git a/llvm/include/llvm/Support/RandomNumberGenerator.h b/llvm/include/llvm/Support/RandomNumberGenerator.h
index 55d6876..e1a11e2 100644
--- a/llvm/include/llvm/Support/RandomNumberGenerator.h
+++ b/llvm/include/llvm/Support/RandomNumberGenerator.h
@@ -41,7 +41,7 @@ public:
using result_type = generator_type::result_type;
/// Returns a random number in the range [0, Max).
- result_type operator()();
+ LLVM_ABI result_type operator()();
static constexpr result_type min() { return generator_type::min(); }
static constexpr result_type max() { return generator_type::max(); }
@@ -63,7 +63,7 @@ private:
};
// Get random vector of specified size
-std::error_code getRandomBytes(void *Buffer, size_t Size);
+LLVM_ABI std::error_code getRandomBytes(void *Buffer, size_t Size);
}
#endif
diff --git a/llvm/include/llvm/Support/Recycler.h b/llvm/include/llvm/Support/Recycler.h
index e531e23..b51c586 100644
--- a/llvm/include/llvm/Support/Recycler.h
+++ b/llvm/include/llvm/Support/Recycler.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/ilist.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
@@ -24,7 +25,8 @@ namespace llvm {
/// PrintRecyclingAllocatorStats - Helper for RecyclingAllocator for
/// printing statistics.
///
-void PrintRecyclerStats(size_t Size, size_t Align, size_t FreeListSize);
+LLVM_ABI void PrintRecyclerStats(size_t Size, size_t Align,
+ size_t FreeListSize);
/// Recycler - This class manages a linked-list of deallocated nodes
/// and facilitates reusing deallocated memory in place of allocating
diff --git a/llvm/include/llvm/Support/Regex.h b/llvm/include/llvm/Support/Regex.h
index bb7a800..98404c328 100644
--- a/llvm/include/llvm/Support/Regex.h
+++ b/llvm/include/llvm/Support/Regex.h
@@ -17,6 +17,7 @@
#define LLVM_SUPPORT_REGEX_H
#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/Support/Compiler.h"
#include <string>
struct llvm_regex;
@@ -45,31 +46,31 @@ namespace llvm {
LLVM_MARK_AS_BITMASK_ENUM(BasicRegex)
};
- Regex();
+ LLVM_ABI Regex();
/// Compiles the given regular expression \p Regex.
///
/// \param Regex - referenced string is no longer needed after this
/// constructor does finish. Only its compiled form is kept stored.
- Regex(StringRef Regex, RegexFlags Flags = NoFlags);
- Regex(StringRef Regex, unsigned Flags);
+ LLVM_ABI Regex(StringRef Regex, RegexFlags Flags = NoFlags);
+ LLVM_ABI Regex(StringRef Regex, unsigned Flags);
Regex(const Regex &) = delete;
Regex &operator=(Regex regex) {
std::swap(preg, regex.preg);
std::swap(error, regex.error);
return *this;
}
- Regex(Regex &&regex);
- ~Regex();
+ LLVM_ABI Regex(Regex &&regex);
+ LLVM_ABI ~Regex();
/// isValid - returns the error encountered during regex compilation, if
/// any.
- bool isValid(std::string &Error) const;
+ LLVM_ABI bool isValid(std::string &Error) const;
bool isValid() const { return !error; }
/// getNumMatches - In a valid regex, return the number of parenthesized
/// matches it contains. The number filled in by match will include this
/// many entries plus one for the whole regex (as element 0).
- unsigned getNumMatches() const;
+ LLVM_ABI unsigned getNumMatches() const;
/// matches - Match the regex against a given \p String.
///
@@ -81,8 +82,9 @@ namespace llvm {
/// as a non-empty string. If there is no error, it will be an empty string.
///
/// This returns true on a successful match.
- bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = nullptr,
- std::string *Error = nullptr) const;
+ LLVM_ABI bool match(StringRef String,
+ SmallVectorImpl<StringRef> *Matches = nullptr,
+ std::string *Error = nullptr) const;
/// sub - Return the result of replacing the first match of the regex in
/// \p String with the \p Repl string. Backreferences like "\0" and "\g<1>"
@@ -95,15 +97,15 @@ namespace llvm {
/// \param Error If non-null, any errors in the substitution (invalid
/// backreferences, trailing backslashes) will be recorded as a non-empty
/// string. If there is no error, it will be an empty string.
- std::string sub(StringRef Repl, StringRef String,
- std::string *Error = nullptr) const;
+ LLVM_ABI std::string sub(StringRef Repl, StringRef String,
+ std::string *Error = nullptr) const;
/// If this function returns true, ^Str$ is an extended regular
/// expression that matches Str and only Str.
- static bool isLiteralERE(StringRef Str);
+ LLVM_ABI static bool isLiteralERE(StringRef Str);
/// Turn String into a regex by escaping its special characters.
- static std::string escape(StringRef String);
+ LLVM_ABI static std::string escape(StringRef String);
private:
struct llvm_regex *preg;
diff --git a/llvm/include/llvm/Support/SHA1.h b/llvm/include/llvm/Support/SHA1.h
index ae6d62a..93d14a3 100644
--- a/llvm/include/llvm/Support/SHA1.h
+++ b/llvm/include/llvm/Support/SHA1.h
@@ -15,6 +15,7 @@
#ifndef LLVM_SUPPORT_SHA1_H
#define LLVM_SUPPORT_SHA1_H
+#include "llvm/Support/Compiler.h"
#include <array>
#include <cstdint>
@@ -28,28 +29,28 @@ public:
SHA1() { init(); }
/// Reinitialize the internal state
- void init();
+ LLVM_ABI void init();
/// Digest more data.
- void update(ArrayRef<uint8_t> Data);
+ LLVM_ABI void update(ArrayRef<uint8_t> Data);
/// Digest more data.
- void update(StringRef Str);
+ LLVM_ABI void update(StringRef Str);
/// Return the current raw 160-bits SHA1 for the digested data
/// since the last call to init(). This call will add data to the internal
/// state and as such is not suited for getting an intermediate result
/// (see result()).
- std::array<uint8_t, 20> final();
+ LLVM_ABI std::array<uint8_t, 20> final();
/// Return the current raw 160-bits SHA1 for the digested data
/// since the last call to init(). This is suitable for getting the SHA1 at
/// any time without invalidating the internal state so that more calls can be
/// made into update.
- std::array<uint8_t, 20> result();
+ LLVM_ABI std::array<uint8_t, 20> result();
/// Returns a raw 160-bit SHA1 hash for the given data.
- static std::array<uint8_t, 20> hash(ArrayRef<uint8_t> Data);
+ LLVM_ABI static std::array<uint8_t, 20> hash(ArrayRef<uint8_t> Data);
private:
/// Define some constants.
diff --git a/llvm/include/llvm/Support/SHA256.h b/llvm/include/llvm/Support/SHA256.h
index 68b32c7..2704db1 100644
--- a/llvm/include/llvm/Support/SHA256.h
+++ b/llvm/include/llvm/Support/SHA256.h
@@ -22,6 +22,7 @@
#ifndef LLVM_SUPPORT_SHA256_H
#define LLVM_SUPPORT_SHA256_H
+#include "llvm/Support/Compiler.h"
#include <array>
#include <cstdint>
@@ -35,28 +36,28 @@ public:
explicit SHA256() { init(); }
/// Reinitialize the internal state
- void init();
+ LLVM_ABI void init();
/// Digest more data.
- void update(ArrayRef<uint8_t> Data);
+ LLVM_ABI void update(ArrayRef<uint8_t> Data);
/// Digest more data.
- void update(StringRef Str);
+ LLVM_ABI void update(StringRef Str);
/// Return the current raw 256-bits SHA256 for the digested
/// data since the last call to init(). This call will add data to the
/// internal state and as such is not suited for getting an intermediate
/// result (see result()).
- std::array<uint8_t, 32> final();
+ LLVM_ABI std::array<uint8_t, 32> final();
/// Return the current raw 256-bits SHA256 for the digested
/// data since the last call to init(). This is suitable for getting the
/// SHA256 at any time without invalidating the internal state so that more
/// calls can be made into update.
- std::array<uint8_t, 32> result();
+ LLVM_ABI std::array<uint8_t, 32> result();
/// Returns a raw 256-bit SHA256 hash for the given data.
- static std::array<uint8_t, 32> hash(ArrayRef<uint8_t> Data);
+ LLVM_ABI static std::array<uint8_t, 32> hash(ArrayRef<uint8_t> Data);
private:
/// Define some constants.
diff --git a/llvm/include/llvm/Support/SMTAPI.h b/llvm/include/llvm/Support/SMTAPI.h
index a2a8967..f1bb86c 100644
--- a/llvm/include/llvm/Support/SMTAPI.h
+++ b/llvm/include/llvm/Support/SMTAPI.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
@@ -70,7 +71,7 @@ public:
virtual void print(raw_ostream &OS) const = 0;
- LLVM_DUMP_METHOD void dump() const;
+ LLVM_ABI LLVM_DUMP_METHOD void dump() const;
protected:
/// Query the SMT solver and returns true if two sorts are equal (same kind
@@ -117,7 +118,7 @@ public:
virtual void print(raw_ostream &OS) const = 0;
- LLVM_DUMP_METHOD void dump() const;
+ LLVM_ABI LLVM_DUMP_METHOD void dump() const;
protected:
/// Query the SMT solver and returns true if two sorts are equal (same kind
@@ -135,7 +136,7 @@ public:
virtual void print(raw_ostream &OS) const = 0;
- LLVM_DUMP_METHOD void dump() const;
+ LLVM_ABI LLVM_DUMP_METHOD void dump() const;
};
/// Shared pointer for SMTExprs, used by SMTSolver API.
@@ -151,7 +152,7 @@ public:
SMTSolver() = default;
virtual ~SMTSolver() = default;
- LLVM_DUMP_METHOD void dump() const;
+ LLVM_ABI LLVM_DUMP_METHOD void dump() const;
// Returns an appropriate floating-point sort for the given bitwidth.
SMTSortRef getFloatSort(unsigned BitWidth) {
@@ -459,7 +460,7 @@ public:
using SMTSolverRef = std::shared_ptr<SMTSolver>;
/// Convenience method to create and Z3Solver object
-SMTSolverRef CreateZ3Solver();
+LLVM_ABI SMTSolverRef CreateZ3Solver();
} // namespace llvm
diff --git a/llvm/include/llvm/Support/ScaledNumber.h b/llvm/include/llvm/Support/ScaledNumber.h
index faf3ce3..87a5680 100644
--- a/llvm/include/llvm/Support/ScaledNumber.h
+++ b/llvm/include/llvm/Support/ScaledNumber.h
@@ -21,6 +21,7 @@
#ifndef LLVM_SUPPORT_SCALEDNUMBER_H
#define LLVM_SUPPORT_SCALEDNUMBER_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cstdint>
@@ -105,7 +106,7 @@ inline std::pair<uint64_t, int16_t> getAdjusted64(uint64_t Digits,
/// Multiply two 64-bit integers to create a 64-bit scaled number.
///
/// Implemented with four 64-bit integer multiplies.
-std::pair<uint64_t, int16_t> multiply64(uint64_t LHS, uint64_t RHS);
+LLVM_ABI std::pair<uint64_t, int16_t> multiply64(uint64_t LHS, uint64_t RHS);
/// Multiply two 32-bit integers to create a 32-bit scaled number.
///
@@ -135,14 +136,16 @@ inline std::pair<uint64_t, int16_t> getProduct64(uint64_t LHS, uint64_t RHS) {
/// Implemented with long division.
///
/// \pre \c Dividend and \c Divisor are non-zero.
-std::pair<uint64_t, int16_t> divide64(uint64_t Dividend, uint64_t Divisor);
+LLVM_ABI std::pair<uint64_t, int16_t> divide64(uint64_t Dividend,
+ uint64_t Divisor);
/// Divide two 32-bit integers to create a 32-bit scaled number.
///
/// Implemented with one 64-bit integer divide/remainder pair.
///
/// \pre \c Dividend and \c Divisor are non-zero.
-std::pair<uint32_t, int16_t> divide32(uint32_t Dividend, uint32_t Divisor);
+LLVM_ABI std::pair<uint32_t, int16_t> divide32(uint32_t Dividend,
+ uint32_t Divisor);
/// Divide two 32-bit numbers to create a 32-bit scaled number.
///
@@ -242,7 +245,7 @@ template <class DigitsT> int32_t getLgCeiling(DigitsT Digits, int16_t Scale) {
/// 1, and 0 for less than, greater than, and equal, respectively.
///
/// \pre 0 <= ScaleDiff < 64.
-int compareImpl(uint64_t L, uint64_t R, int ScaleDiff);
+LLVM_ABI int compareImpl(uint64_t L, uint64_t R, int ScaleDiff);
/// Compare two scaled numbers.
///
@@ -421,11 +424,11 @@ class ScaledNumberBase {
public:
static constexpr int DefaultPrecision = 10;
- static void dump(uint64_t D, int16_t E, int Width);
- static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
- unsigned Precision);
- static std::string toString(uint64_t D, int16_t E, int Width,
- unsigned Precision);
+ LLVM_ABI static void dump(uint64_t D, int16_t E, int Width);
+ LLVM_ABI static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E,
+ int Width, unsigned Precision);
+ LLVM_ABI static std::string toString(uint64_t D, int16_t E, int Width,
+ unsigned Precision);
static int countLeadingZeros32(uint32_t N) { return llvm::countl_zero(N); }
static int countLeadingZeros64(uint64_t N) { return llvm::countl_zero(N); }
static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
diff --git a/llvm/include/llvm/Support/ScopedPrinter.h b/llvm/include/llvm/Support/ScopedPrinter.h
index 506b40a..753fd3f 100644
--- a/llvm/include/llvm/Support/ScopedPrinter.h
+++ b/llvm/include/llvm/Support/ScopedPrinter.h
@@ -14,6 +14,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/JSON.h"
@@ -80,7 +81,7 @@ struct FlagEntry {
uint64_t Value;
};
-raw_ostream &operator<<(raw_ostream &OS, const HexNumber &Value);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const HexNumber &Value);
template <class T> std::string to_string(const T &Value) {
std::string number;
@@ -97,7 +98,7 @@ std::string enumToString(T Value, ArrayRef<EnumEntry<TEnum>> EnumValues) {
return utohexstr(Value, true);
}
-class ScopedPrinter {
+class LLVM_ABI ScopedPrinter {
public:
enum class ScopedPrinterKind {
Base,
@@ -572,9 +573,9 @@ private:
std::unique_ptr<DelimitedScope> OuterScope;
public:
- JSONScopedPrinter(raw_ostream &OS, bool PrettyPrint = false,
- std::unique_ptr<DelimitedScope> &&OuterScope =
- std::unique_ptr<DelimitedScope>{});
+ LLVM_ABI JSONScopedPrinter(raw_ostream &OS, bool PrettyPrint = false,
+ std::unique_ptr<DelimitedScope> &&OuterScope =
+ std::unique_ptr<DelimitedScope>{});
static bool classof(const ScopedPrinter *SP) {
return SP->getKind() == ScopedPrinter::ScopedPrinterKind::JSON;
diff --git a/llvm/include/llvm/Support/Signals.h b/llvm/include/llvm/Support/Signals.h
index 0a560e6..6ce26ac 100644
--- a/llvm/include/llvm/Support/Signals.h
+++ b/llvm/include/llvm/Support/Signals.h
@@ -14,6 +14,7 @@
#ifndef LLVM_SUPPORT_SIGNALS_H
#define LLVM_SUPPORT_SIGNALS_H
+#include "llvm/Support/Compiler.h"
#include <cstdint>
#include <string>
@@ -25,16 +26,17 @@ namespace sys {
/// This function runs all the registered interrupt handlers, including the
/// removal of files registered by RemoveFileOnSignal.
-void RunInterruptHandlers();
+LLVM_ABI void RunInterruptHandlers();
/// This function registers signal handlers to ensure that if a signal gets
/// delivered that the named file is removed.
/// Remove a file if a fatal signal occurs.
-bool RemoveFileOnSignal(StringRef Filename, std::string *ErrMsg = nullptr);
+LLVM_ABI bool RemoveFileOnSignal(StringRef Filename,
+ std::string *ErrMsg = nullptr);
/// This function removes a file from the list of files to be removed on
/// signal delivery.
-void DontRemoveFileOnSignal(StringRef Filename);
+LLVM_ABI void DontRemoveFileOnSignal(StringRef Filename);
/// When an error signal (such as SIGABRT or SIGSEGV) is delivered to the
/// process, print a stack trace and then exit.
@@ -44,26 +46,26 @@ void DontRemoveFileOnSignal(StringRef Filename);
/// StringRef(), in which case we will only search $PATH.
/// \param DisableCrashReporting if \c true, disable the normal crash
/// reporting mechanisms on the underlying operating system.
-void PrintStackTraceOnErrorSignal(StringRef Argv0,
- bool DisableCrashReporting = false);
+LLVM_ABI void PrintStackTraceOnErrorSignal(StringRef Argv0,
+ bool DisableCrashReporting = false);
/// Disable all system dialog boxes that appear when the process crashes.
-void DisableSystemDialogsOnCrash();
+LLVM_ABI void DisableSystemDialogsOnCrash();
/// Print the stack trace using the given \c raw_ostream object.
/// \param Depth refers to the number of stackframes to print. If not
/// specified, the entire frame is printed.
-void PrintStackTrace(raw_ostream &OS, int Depth = 0);
+LLVM_ABI void PrintStackTrace(raw_ostream &OS, int Depth = 0);
// Run all registered signal handlers.
-void RunSignalHandlers();
+LLVM_ABI void RunSignalHandlers();
using SignalHandlerCallback = void (*)(void *);
/// Add a function to be called when an abort/kill signal is delivered to the
/// process. The handler can have a cookie passed to it to identify what
/// instance of the handler it is.
-void AddSignalHandler(SignalHandlerCallback FnPtr, void *Cookie);
+LLVM_ABI void AddSignalHandler(SignalHandlerCallback FnPtr, void *Cookie);
/// This function registers a function to be called when the user "interrupts"
/// the program (typically by pressing ctrl-c). When the user interrupts the
@@ -74,7 +76,7 @@ void AddSignalHandler(SignalHandlerCallback FnPtr, void *Cookie);
/// functions. An null interrupt function pointer disables the current
/// installed function. Note also that the handler may be executed on a
/// different thread on some platforms.
-void SetInterruptFunction(void (*IF)());
+LLVM_ABI void SetInterruptFunction(void (*IF)());
/// Registers a function to be called when an "info" signal is delivered to
/// the process.
@@ -86,7 +88,7 @@ void SetInterruptFunction(void (*IF)());
/// functions. An null function pointer disables the current installed
/// function. Note also that the handler may be executed on a different
/// thread on some platforms.
-void SetInfoSignalFunction(void (*Handler)());
+LLVM_ABI void SetInfoSignalFunction(void (*Handler)());
/// Registers a function to be called in a "one-shot" manner when a pipe
/// signal is delivered to the process (i.e., on a failed write to a pipe).
@@ -102,15 +104,15 @@ void SetInfoSignalFunction(void (*Handler)());
/// functions. A null handler pointer disables the current installed
/// function. Note also that the handler may be executed on a
/// different thread on some platforms.
-void SetOneShotPipeSignalFunction(void (*Handler)());
+LLVM_ABI void SetOneShotPipeSignalFunction(void (*Handler)());
/// On Unix systems and Windows, this function exits with an "IO error" exit
/// code.
-void DefaultOneShotPipeSignalHandler();
+LLVM_ABI void DefaultOneShotPipeSignalHandler();
#ifdef _WIN32
/// Windows does not support signals and this handler must be called manually.
-void CallOneShotPipeSignalHandler();
+LLVM_ABI void CallOneShotPipeSignalHandler();
#endif
/// This function does the following:
@@ -120,9 +122,9 @@ void CallOneShotPipeSignalHandler();
/// - create a core/mini dump of the exception context whenever possible
/// Context is a system-specific failure context: it is the signal type on
/// Unix; the ExceptionContext on Windows.
-void CleanupOnSignal(uintptr_t Context);
+LLVM_ABI void CleanupOnSignal(uintptr_t Context);
-void unregisterHandlers();
+LLVM_ABI void unregisterHandlers();
} // namespace sys
} // namespace llvm
diff --git a/llvm/include/llvm/Support/Signposts.h b/llvm/include/llvm/Support/Signposts.h
index 37089bd..83667ba 100644
--- a/llvm/include/llvm/Support/Signposts.h
+++ b/llvm/include/llvm/Support/Signposts.h
@@ -16,6 +16,7 @@
#ifndef LLVM_SUPPORT_SIGNPOSTS_H
#define LLVM_SUPPORT_SIGNPOSTS_H
+#include "llvm/Support/Compiler.h"
#include <memory>
namespace llvm {
@@ -28,15 +29,15 @@ class SignpostEmitter {
std::unique_ptr<SignpostEmitterImpl> Impl;
public:
- SignpostEmitter();
- ~SignpostEmitter();
+ LLVM_ABI SignpostEmitter();
+ LLVM_ABI ~SignpostEmitter();
- bool isEnabled() const;
+ LLVM_ABI bool isEnabled() const;
/// Begin a signposted interval for a given object.
- void startInterval(const void *O, StringRef Name);
+ LLVM_ABI void startInterval(const void *O, StringRef Name);
/// End a signposted interval for a given object.
- void endInterval(const void *O, StringRef Name);
+ LLVM_ABI void endInterval(const void *O, StringRef Name);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/SipHash.h b/llvm/include/llvm/Support/SipHash.h
index ab05ff3..910cf594 100644
--- a/llvm/include/llvm/Support/SipHash.h
+++ b/llvm/include/llvm/Support/SipHash.h
@@ -17,6 +17,7 @@
#ifndef LLVM_SUPPORT_SIPHASH_H
#define LLVM_SUPPORT_SIPHASH_H
+#include "llvm/Support/Compiler.h"
#include <cstdint>
namespace llvm {
@@ -25,12 +26,12 @@ template <typename T> class ArrayRef;
class StringRef;
/// Computes a SipHash-2-4 64-bit result.
-void getSipHash_2_4_64(ArrayRef<uint8_t> In, const uint8_t (&K)[16],
- uint8_t (&Out)[8]);
+LLVM_ABI void getSipHash_2_4_64(ArrayRef<uint8_t> In, const uint8_t (&K)[16],
+ uint8_t (&Out)[8]);
/// Computes a SipHash-2-4 128-bit result.
-void getSipHash_2_4_128(ArrayRef<uint8_t> In, const uint8_t (&K)[16],
- uint8_t (&Out)[16]);
+LLVM_ABI void getSipHash_2_4_128(ArrayRef<uint8_t> In, const uint8_t (&K)[16],
+ uint8_t (&Out)[16]);
/// Compute a stable non-zero 16-bit hash of the given string.
///
@@ -45,7 +46,7 @@ void getSipHash_2_4_128(ArrayRef<uint8_t> In, const uint8_t (&K)[16],
/// 16 bits is also sufficiently compact to not inflate a loader relocation.
/// We disallow zero to guarantee a different discriminator from the places
/// in the ABI that use a constant zero.
-uint16_t getPointerAuthStableSipHash(StringRef S);
+LLVM_ABI uint16_t getPointerAuthStableSipHash(StringRef S);
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h b/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h
index f7f2d4e..da096d1 100644
--- a/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h
+++ b/llvm/include/llvm/Support/SmallVectorMemoryBuffer.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_SMALLVECTORMEMORYBUFFER_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
@@ -26,7 +27,7 @@ namespace llvm {
/// instances. This is useful for MCJIT and Orc, where object files are streamed
/// into SmallVectors, then inspected using ObjectFile (which takes a
/// MemoryBuffer).
-class SmallVectorMemoryBuffer : public MemoryBuffer {
+class LLVM_ABI SmallVectorMemoryBuffer : public MemoryBuffer {
public:
/// Construct a SmallVectorMemoryBuffer from the given SmallVector r-value.
SmallVectorMemoryBuffer(SmallVectorImpl<char> &&SV,
diff --git a/llvm/include/llvm/Support/SourceMgr.h b/llvm/include/llvm/Support/SourceMgr.h
index 7a4b6de..5637b64 100644
--- a/llvm/include/llvm/Support/SourceMgr.h
+++ b/llvm/include/llvm/Support/SourceMgr.h
@@ -16,6 +16,7 @@
#define LLVM_SUPPORT_SOURCEMGR_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SMLoc.h"
#include <vector>
@@ -61,13 +62,13 @@ private:
/// Look up a given \p Ptr in the buffer, determining which line it came
/// from.
- unsigned getLineNumber(const char *Ptr) const;
+ LLVM_ABI unsigned getLineNumber(const char *Ptr) const;
template <typename T>
unsigned getLineNumberSpecialized(const char *Ptr) const;
/// Return a pointer to the first character of the specified line number or
/// null if the line number is invalid.
- const char *getPointerForLineNumber(unsigned LineNo) const;
+ LLVM_ABI const char *getPointerForLineNumber(unsigned LineNo) const;
template <typename T>
const char *getPointerForLineNumberSpecialized(unsigned LineNo) const;
@@ -75,10 +76,10 @@ private:
SMLoc IncludeLoc;
SrcBuffer() = default;
- SrcBuffer(SrcBuffer &&);
+ LLVM_ABI SrcBuffer(SrcBuffer &&);
SrcBuffer(const SrcBuffer &) = delete;
SrcBuffer &operator=(const SrcBuffer &) = delete;
- ~SrcBuffer();
+ LLVM_ABI ~SrcBuffer();
};
/// This is all of the buffers that we are reading from.
@@ -172,8 +173,8 @@ public:
/// If no file is found, this returns 0, otherwise it returns the buffer ID
/// of the stacked file. The full path to the included file can be found in
/// \p IncludedFile.
- unsigned AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc,
- std::string &IncludedFile);
+ LLVM_ABI unsigned AddIncludeFile(const std::string &Filename,
+ SMLoc IncludeLoc, std::string &IncludedFile);
/// Search for a file with the specified name in the current directory or in
/// one of the IncludeDirs, and try to open it **without** adding to the
@@ -183,13 +184,13 @@ public:
/// If no file is found, this returns an Error, otherwise it returns the
/// buffer of the stacked file. The full path to the included file can be
/// found in \p IncludedFile.
- ErrorOr<std::unique_ptr<MemoryBuffer>>
+ LLVM_ABI ErrorOr<std::unique_ptr<MemoryBuffer>>
OpenIncludeFile(const std::string &Filename, std::string &IncludedFile);
/// Return the ID of the buffer containing the specified location.
///
/// 0 is returned if the buffer is not found.
- unsigned FindBufferContainingLoc(SMLoc Loc) const;
+ LLVM_ABI unsigned FindBufferContainingLoc(SMLoc Loc) const;
/// Find the line number for the specified location in the specified file.
/// This is not a fast method.
@@ -199,49 +200,49 @@ public:
/// Find the line and column number for the specified location in the
/// specified file. This is not a fast method.
- std::pair<unsigned, unsigned> getLineAndColumn(SMLoc Loc,
- unsigned BufferID = 0) const;
+ LLVM_ABI std::pair<unsigned, unsigned>
+ getLineAndColumn(SMLoc Loc, unsigned BufferID = 0) const;
/// Get a string with the \p SMLoc filename and line number
/// formatted in the standard style.
- std::string getFormattedLocationNoOffset(SMLoc Loc,
- bool IncludePath = false) const;
+ LLVM_ABI std::string
+ getFormattedLocationNoOffset(SMLoc Loc, bool IncludePath = false) const;
/// Given a line and column number in a mapped buffer, turn it into an SMLoc.
/// This will return a null SMLoc if the line/column location is invalid.
- SMLoc FindLocForLineAndColumn(unsigned BufferID, unsigned LineNo,
- unsigned ColNo);
+ LLVM_ABI SMLoc FindLocForLineAndColumn(unsigned BufferID, unsigned LineNo,
+ unsigned ColNo);
/// Emit a message about the specified location with the specified string.
///
/// \param ShowColors Display colored messages if output is a terminal and
/// the default error handler is used.
- void PrintMessage(raw_ostream &OS, SMLoc Loc, DiagKind Kind, const Twine &Msg,
- ArrayRef<SMRange> Ranges = {},
- ArrayRef<SMFixIt> FixIts = {},
- bool ShowColors = true) const;
+ LLVM_ABI void PrintMessage(raw_ostream &OS, SMLoc Loc, DiagKind Kind,
+ const Twine &Msg, ArrayRef<SMRange> Ranges = {},
+ ArrayRef<SMFixIt> FixIts = {},
+ bool ShowColors = true) const;
/// Emits a diagnostic to llvm::errs().
- void PrintMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
- ArrayRef<SMRange> Ranges = {},
- ArrayRef<SMFixIt> FixIts = {},
- bool ShowColors = true) const;
+ LLVM_ABI void PrintMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = {},
+ ArrayRef<SMFixIt> FixIts = {},
+ bool ShowColors = true) const;
/// Emits a manually-constructed diagnostic to the given output stream.
///
/// \param ShowColors Display colored messages if output is a terminal and
/// the default error handler is used.
- void PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic,
- bool ShowColors = true) const;
+ LLVM_ABI void PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic,
+ bool ShowColors = true) const;
/// Return an SMDiagnostic at the specified location with the specified
/// string.
///
/// \param Msg If non-null, the kind of message (e.g., "error") which is
/// prefixed to the message.
- SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
- ArrayRef<SMRange> Ranges = {},
- ArrayRef<SMFixIt> FixIts = {}) const;
+ LLVM_ABI SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = {},
+ ArrayRef<SMFixIt> FixIts = {}) const;
/// Prints the names of included files and the line of the file they were
/// included from. A diagnostic handler can use this before printing its
@@ -249,7 +250,7 @@ public:
///
/// \param IncludeLoc The location of the include.
/// \param OS the raw_ostream to print on.
- void PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const;
+ LLVM_ABI void PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const;
};
/// Represents a single fixit, a replacement of one range of text with another.
@@ -259,7 +260,7 @@ class SMFixIt {
std::string Text;
public:
- SMFixIt(SMRange R, const Twine &Replacement);
+ LLVM_ABI SMFixIt(SMRange R, const Twine &Replacement);
SMFixIt(SMLoc Loc, const Twine &Replacement)
: SMFixIt(SMRange(Loc, Loc), Replacement) {}
@@ -297,10 +298,11 @@ public:
: Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Knd), Message(Msg) {}
// Diagnostic with a location.
- SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN, int Line, int Col,
- SourceMgr::DiagKind Kind, StringRef Msg, StringRef LineStr,
- ArrayRef<std::pair<unsigned, unsigned>> Ranges,
- ArrayRef<SMFixIt> FixIts = {});
+ LLVM_ABI SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN, int Line,
+ int Col, SourceMgr::DiagKind Kind, StringRef Msg,
+ StringRef LineStr,
+ ArrayRef<std::pair<unsigned, unsigned>> Ranges,
+ ArrayRef<SMFixIt> FixIts = {});
const SourceMgr *getSourceMgr() const { return SM; }
SMLoc getLoc() const { return Loc; }
@@ -316,8 +318,9 @@ public:
ArrayRef<SMFixIt> getFixIts() const { return FixIts; }
- void print(const char *ProgName, raw_ostream &S, bool ShowColors = true,
- bool ShowKindLabel = true, bool ShowLocation = true) const;
+ LLVM_ABI void print(const char *ProgName, raw_ostream &S,
+ bool ShowColors = true, bool ShowKindLabel = true,
+ bool ShowLocation = true) const;
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/SpecialCaseList.h b/llvm/include/llvm/Support/SpecialCaseList.h
index 30e3fc6..ca2030a 100644
--- a/llvm/include/llvm/Support/SpecialCaseList.h
+++ b/llvm/include/llvm/Support/SpecialCaseList.h
@@ -13,6 +13,7 @@
#define LLVM_SUPPORT_SPECIALCASELIST_H
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/GlobPattern.h"
#include "llvm/Support/Regex.h"
#include <memory>
@@ -70,27 +71,27 @@ class SpecialCaseList {
public:
/// Parses the special case list entries from files. On failure, returns
/// 0 and writes an error message to string.
- static std::unique_ptr<SpecialCaseList>
+ LLVM_ABI static std::unique_ptr<SpecialCaseList>
create(const std::vector<std::string> &Paths, llvm::vfs::FileSystem &FS,
std::string &Error);
/// Parses the special case list from a memory buffer. On failure, returns
/// 0 and writes an error message to string.
- static std::unique_ptr<SpecialCaseList> create(const MemoryBuffer *MB,
- std::string &Error);
+ LLVM_ABI static std::unique_ptr<SpecialCaseList>
+ create(const MemoryBuffer *MB, std::string &Error);
/// Parses the special case list entries from files. On failure, reports a
/// fatal error.
- static std::unique_ptr<SpecialCaseList>
+ LLVM_ABI static std::unique_ptr<SpecialCaseList>
createOrDie(const std::vector<std::string> &Paths, llvm::vfs::FileSystem &FS);
- ~SpecialCaseList();
+ LLVM_ABI ~SpecialCaseList();
/// Returns true, if special case list contains a line
/// \code
/// @Prefix:<E>=@Category
/// \endcode
/// where @Query satisfies the glob <E> in a given @Section.
- bool inSection(StringRef Section, StringRef Prefix, StringRef Query,
- StringRef Category = StringRef()) const;
+ LLVM_ABI bool inSection(StringRef Section, StringRef Prefix, StringRef Query,
+ StringRef Category = StringRef()) const;
/// Returns the line number corresponding to the special case list entry if
/// the special case list contains a line
@@ -100,15 +101,16 @@ public:
/// where @Query satisfies the glob <E> in a given @Section.
/// Returns zero if there is no exclusion entry corresponding to this
/// expression.
- unsigned inSectionBlame(StringRef Section, StringRef Prefix, StringRef Query,
- StringRef Category = StringRef()) const;
+ LLVM_ABI unsigned inSectionBlame(StringRef Section, StringRef Prefix,
+ StringRef Query,
+ StringRef Category = StringRef()) const;
protected:
// Implementations of the create*() functions that can also be used by derived
// classes.
- bool createInternal(const std::vector<std::string> &Paths,
- vfs::FileSystem &VFS, std::string &Error);
- bool createInternal(const MemoryBuffer *MB, std::string &Error);
+ LLVM_ABI bool createInternal(const std::vector<std::string> &Paths,
+ vfs::FileSystem &VFS, std::string &Error);
+ LLVM_ABI bool createInternal(const MemoryBuffer *MB, std::string &Error);
SpecialCaseList() = default;
SpecialCaseList(SpecialCaseList const &) = delete;
@@ -117,10 +119,11 @@ protected:
/// Represents a set of globs and their line numbers
class Matcher {
public:
- Error insert(StringRef Pattern, unsigned LineNumber, bool UseRegex);
+ LLVM_ABI Error insert(StringRef Pattern, unsigned LineNumber,
+ bool UseRegex);
// Returns the line number in the source file that this query matches to.
// Returns zero if no match is found.
- unsigned match(StringRef Query) const;
+ LLVM_ABI unsigned match(StringRef Query) const;
StringMap<std::pair<GlobPattern, unsigned>> Globs;
std::vector<std::pair<std::unique_ptr<Regex>, unsigned>> RegExes;
@@ -138,16 +141,17 @@ protected:
StringMap<Section> Sections;
- Expected<Section *> addSection(StringRef SectionStr, unsigned LineNo,
- bool UseGlobs = true);
+ LLVM_ABI Expected<Section *> addSection(StringRef SectionStr, unsigned LineNo,
+ bool UseGlobs = true);
/// Parses just-constructed SpecialCaseList entries from a memory buffer.
- bool parse(const MemoryBuffer *MB, std::string &Error);
+ LLVM_ABI bool parse(const MemoryBuffer *MB, std::string &Error);
// Helper method for derived classes to search by Prefix, Query, and Category
// once they have already resolved a section entry.
- unsigned inSectionBlame(const SectionEntries &Entries, StringRef Prefix,
- StringRef Query, StringRef Category) const;
+ LLVM_ABI unsigned inSectionBlame(const SectionEntries &Entries,
+ StringRef Prefix, StringRef Query,
+ StringRef Category) const;
};
} // namespace llvm
diff --git a/llvm/include/llvm/Support/StringSaver.h b/llvm/include/llvm/Support/StringSaver.h
index 95ace0e..ab3d75e 100644
--- a/llvm/include/llvm/Support/StringSaver.h
+++ b/llvm/include/llvm/Support/StringSaver.h
@@ -13,6 +13,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -28,8 +29,8 @@ public:
// All returned strings are null-terminated: *save(S).end() == 0.
StringRef save(const char *S) { return save(StringRef(S)); }
- StringRef save(StringRef S);
- StringRef save(const Twine &S);
+ LLVM_ABI StringRef save(StringRef S);
+ LLVM_ABI StringRef save(const Twine &S);
StringRef save(const std::string &S) { return save(StringRef(S)); }
};
@@ -50,8 +51,8 @@ public:
// All returned strings are null-terminated: *save(S).end() == 0.
StringRef save(const char *S) { return save(StringRef(S)); }
- StringRef save(StringRef S);
- StringRef save(const Twine &S);
+ LLVM_ABI StringRef save(StringRef S);
+ LLVM_ABI StringRef save(const Twine &S);
StringRef save(const std::string &S) { return save(StringRef(S)); }
};
diff --git a/llvm/include/llvm/Support/SuffixTree.h b/llvm/include/llvm/Support/SuffixTree.h
index 37b7366..4c78235 100644
--- a/llvm/include/llvm/Support/SuffixTree.h
+++ b/llvm/include/llvm/Support/SuffixTree.h
@@ -34,6 +34,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/SuffixTreeNode.h"
namespace llvm {
@@ -152,8 +153,8 @@ public:
/// \param Str The string to construct the suffix tree for.
/// \param OutlinerLeafDescendants Whether to consider leaf descendants or
/// only leaf children (used by Machine Outliner).
- SuffixTree(const ArrayRef<unsigned> &Str,
- bool OutlinerLeafDescendants = false);
+ LLVM_ABI SuffixTree(const ArrayRef<unsigned> &Str,
+ bool OutlinerLeafDescendants = false);
/// Iterator for finding all repeated substrings in the suffix tree.
struct RepeatedSubstringIterator {
@@ -180,7 +181,7 @@ public:
bool OutlinerLeafDescendants = !LeafNodes.empty();
/// Move the iterator to the next repeated substring.
- void advance();
+ LLVM_ABI void advance();
public:
/// Return the current repeated substring.
diff --git a/llvm/include/llvm/Support/SuffixTreeNode.h b/llvm/include/llvm/Support/SuffixTreeNode.h
index 84b590f..b49febf 100644
--- a/llvm/include/llvm/Support/SuffixTreeNode.h
+++ b/llvm/include/llvm/Support/SuffixTreeNode.h
@@ -26,6 +26,7 @@
#ifndef LLVM_SUPPORT_SUFFIXTREE_NODE_H
#define LLVM_SUPPORT_SUFFIXTREE_NODE_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -62,31 +63,31 @@ public:
NodeKind getKind() const { return Kind; }
/// \return the start index of this node's substring in the entire string.
- unsigned getStartIdx() const;
+ LLVM_ABI unsigned getStartIdx() const;
/// \returns the end index of this node.
virtual unsigned getEndIdx() const = 0;
/// \return the index of this node's left most leaf node.
- unsigned getLeftLeafIdx() const;
+ LLVM_ABI unsigned getLeftLeafIdx() const;
/// \return the index of this node's right most leaf node.
- unsigned getRightLeafIdx() const;
+ LLVM_ABI unsigned getRightLeafIdx() const;
/// Set the index of the left most leaf node of this node to \p Idx.
- void setLeftLeafIdx(unsigned Idx);
+ LLVM_ABI void setLeftLeafIdx(unsigned Idx);
/// Set the index of the right most leaf node of this node to \p Idx.
- void setRightLeafIdx(unsigned Idx);
+ LLVM_ABI void setRightLeafIdx(unsigned Idx);
/// Advance this node's StartIdx by \p Inc.
- void incrementStartIdx(unsigned Inc);
+ LLVM_ABI void incrementStartIdx(unsigned Inc);
/// Set the length of the string from the root to this node to \p Len.
- void setConcatLen(unsigned Len);
+ LLVM_ABI void setConcatLen(unsigned Len);
/// \returns the length of the string from the root to this node.
- unsigned getConcatLen() const;
+ LLVM_ABI unsigned getConcatLen() const;
SuffixTreeNode(NodeKind Kind, unsigned StartIdx)
: Kind(Kind), StartIdx(StartIdx) {}
@@ -94,7 +95,7 @@ public:
};
// A node with two or more children, or the root.
-struct SuffixTreeInternalNode : SuffixTreeNode {
+struct LLVM_ABI SuffixTreeInternalNode : SuffixTreeNode {
private:
/// The end index of this node's substring in the main string.
///
@@ -158,7 +159,7 @@ public:
};
// A node representing a suffix.
-struct SuffixTreeLeafNode : SuffixTreeNode {
+struct LLVM_ABI SuffixTreeLeafNode : SuffixTreeNode {
private:
/// The start index of the suffix represented by this leaf.
unsigned SuffixIdx = EmptyIdx;
diff --git a/llvm/include/llvm/Support/SystemUtils.h b/llvm/include/llvm/Support/SystemUtils.h
index 786bea3..137e7e2 100644
--- a/llvm/include/llvm/Support/SystemUtils.h
+++ b/llvm/include/llvm/Support/SystemUtils.h
@@ -14,6 +14,8 @@
#ifndef LLVM_SUPPORT_SYSTEMUTILS_H
#define LLVM_SUPPORT_SYSTEMUTILS_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class raw_ostream;
@@ -21,7 +23,7 @@ class raw_ostream;
/// generate a warning message to errs() advising against display of bitcode
/// and return true. Otherwise just return false.
/// Check for output written to a console
-bool CheckBitcodeOutputToConsole(
+LLVM_ABI bool CheckBitcodeOutputToConsole(
raw_ostream &stream_to_check ///< The stream to be checked
);
diff --git a/llvm/include/llvm/Support/TarWriter.h b/llvm/include/llvm/Support/TarWriter.h
index 48d810a..b2a2373 100644
--- a/llvm/include/llvm/Support/TarWriter.h
+++ b/llvm/include/llvm/Support/TarWriter.h
@@ -11,16 +11,17 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
class TarWriter {
public:
- static Expected<std::unique_ptr<TarWriter>> create(StringRef OutputPath,
- StringRef BaseDir);
+ LLVM_ABI static Expected<std::unique_ptr<TarWriter>>
+ create(StringRef OutputPath, StringRef BaseDir);
- void append(StringRef Path, StringRef Data);
+ LLVM_ABI void append(StringRef Path, StringRef Data);
private:
TarWriter(int FD, StringRef BaseDir);
diff --git a/llvm/include/llvm/Support/TargetSelect.h b/llvm/include/llvm/Support/TargetSelect.h
index e57614c..912a000 100644
--- a/llvm/include/llvm/Support/TargetSelect.h
+++ b/llvm/include/llvm/Support/TargetSelect.h
@@ -16,34 +16,41 @@
#define LLVM_SUPPORT_TARGETSELECT_H
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
extern "C" {
// Declare all of the target-initialization functions that are available.
-#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetInfo();
+#define LLVM_TARGET(TargetName) \
+ LLVM_ABI void LLVMInitialize##TargetName##TargetInfo();
#include "llvm/Config/Targets.def"
-#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target();
+#define LLVM_TARGET(TargetName) \
+ LLVM_ABI void LLVMInitialize##TargetName##Target();
#include "llvm/Config/Targets.def"
// Declare all of the target-MC-initialization functions that are available.
-#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetMC();
+#define LLVM_TARGET(TargetName) \
+ LLVM_ABI void LLVMInitialize##TargetName##TargetMC();
#include "llvm/Config/Targets.def"
// Declare all of the available assembly printer initialization functions.
-#define LLVM_ASM_PRINTER(TargetName) void LLVMInitialize##TargetName##AsmPrinter();
+#define LLVM_ASM_PRINTER(TargetName) \
+ LLVM_ABI void LLVMInitialize##TargetName##AsmPrinter();
#include "llvm/Config/AsmPrinters.def"
// Declare all of the available assembly parser initialization functions.
-#define LLVM_ASM_PARSER(TargetName) void LLVMInitialize##TargetName##AsmParser();
+#define LLVM_ASM_PARSER(TargetName) \
+ LLVM_ABI void LLVMInitialize##TargetName##AsmParser();
#include "llvm/Config/AsmParsers.def"
// Declare all of the available disassembler initialization functions.
-#define LLVM_DISASSEMBLER(TargetName) \
- void LLVMInitialize##TargetName##Disassembler();
+#define LLVM_DISASSEMBLER(TargetName) \
+ LLVM_ABI void LLVMInitialize##TargetName##Disassembler();
#include "llvm/Config/Disassemblers.def"
// Declare all of the available TargetMCA initialization functions.
-#define LLVM_TARGETMCA(TargetName) void LLVMInitialize##TargetName##TargetMCA();
+#define LLVM_TARGETMCA(TargetName) \
+ LLVM_ABI void LLVMInitialize##TargetName##TargetMCA();
#include "llvm/Config/TargetMCAs.def"
}
diff --git a/llvm/include/llvm/Support/ThreadPool.h b/llvm/include/llvm/Support/ThreadPool.h
index 014b7a0..9272760 100644
--- a/llvm/include/llvm/Support/ThreadPool.h
+++ b/llvm/include/llvm/Support/ThreadPool.h
@@ -15,6 +15,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/RWMutex.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/thread.h"
@@ -46,7 +47,7 @@ class ThreadPoolTaskGroup;
/// available threads are used up by tasks waiting for a task that has no thread
/// left to run on (this includes waiting on the returned future). It should be
/// generally safe to wait() for a group as long as groups do not form a cycle.
-class ThreadPoolInterface {
+class LLVM_ABI ThreadPoolInterface {
/// The actual method to enqueue a task to be defined by the concrete
/// implementation.
virtual void asyncEnqueue(std::function<void()> Task,
@@ -121,7 +122,7 @@ private:
///
/// The pool keeps a vector of threads alive, waiting on a condition variable
/// for some work to become available.
-class StdThreadPool : public ThreadPoolInterface {
+class LLVM_ABI StdThreadPool : public ThreadPoolInterface {
public:
/// Construct a pool using the hardware strategy \p S for mapping hardware
/// execution resources (threads, cores, CPUs)
@@ -215,7 +216,7 @@ private:
#endif // LLVM_ENABLE_THREADS
/// A non-threaded implementation.
-class SingleThreadExecutor : public ThreadPoolInterface {
+class LLVM_ABI SingleThreadExecutor : public ThreadPoolInterface {
public:
/// Construct a non-threaded pool, ignoring using the hardware strategy.
SingleThreadExecutor(ThreadPoolStrategy ignored = {});
diff --git a/llvm/include/llvm/Support/Threading.h b/llvm/include/llvm/Support/Threading.h
index 01e26ad..d3fe0a5 100644
--- a/llvm/include/llvm/Support/Threading.h
+++ b/llvm/include/llvm/Support/Threading.h
@@ -130,17 +130,18 @@ constexpr bool llvm_is_multithreaded() { return LLVM_ENABLE_THREADS; }
/// Retrieves the max available threads for the current strategy. This
/// accounts for affinity masks and takes advantage of all CPU sockets.
- unsigned compute_thread_count() const;
+ LLVM_ABI unsigned compute_thread_count() const;
/// Assign the current thread to an ideal hardware CPU or NUMA node. In a
/// multi-socket system, this ensures threads are assigned to all CPU
/// sockets. \p ThreadPoolNum represents a number bounded by [0,
/// compute_thread_count()).
- void apply_thread_strategy(unsigned ThreadPoolNum) const;
+ LLVM_ABI void apply_thread_strategy(unsigned ThreadPoolNum) const;
/// Finds the CPU socket where a thread should go. Returns 'std::nullopt' if
/// the thread shall remain on the actual CPU socket.
- std::optional<unsigned> compute_cpu_socket(unsigned ThreadPoolNum) const;
+ LLVM_ABI std::optional<unsigned>
+ compute_cpu_socket(unsigned ThreadPoolNum) const;
};
/// Build a strategy from a number of threads as a string provided in \p Num.
@@ -148,7 +149,7 @@ constexpr bool llvm_is_multithreaded() { return LLVM_ENABLE_THREADS; }
/// strategy, we attempt to equally allocate the threads on all CPU sockets.
/// "0" or an empty string will return the \p Default strategy.
/// "all" for using all hardware threads.
- std::optional<ThreadPoolStrategy>
+ LLVM_ABI std::optional<ThreadPoolStrategy>
get_threadpool_strategy(StringRef Num, ThreadPoolStrategy Default = {});
/// Returns a thread strategy for tasks requiring significant memory or other
@@ -213,11 +214,11 @@ constexpr bool llvm_is_multithreaded() { return LLVM_ENABLE_THREADS; }
/// Note that not all platforms guarantee that the value returned will be
/// unique across the entire system, so portable code should not assume
/// this.
- uint64_t get_threadid();
+ LLVM_ABI uint64_t get_threadid();
/// Get the maximum length of a thread name on this platform.
/// A value of 0 means there is no limit.
- uint32_t get_max_thread_name_length();
+ LLVM_ABI uint32_t get_max_thread_name_length();
/// Set the name of the current thread. Setting a thread's name can
/// be helpful for enabling useful diagnostics under a debugger or when
@@ -225,7 +226,7 @@ constexpr bool llvm_is_multithreaded() { return LLVM_ENABLE_THREADS; }
/// wildly across operating systems, and we only make a best effort to
/// perform the operation on supported platforms. No indication of success
/// or failure is returned.
- void set_thread_name(const Twine &Name);
+ LLVM_ABI void set_thread_name(const Twine &Name);
/// Get the name of the current thread. The level of support for
/// getting a thread's name varies wildly across operating systems, and it
@@ -233,20 +234,20 @@ constexpr bool llvm_is_multithreaded() { return LLVM_ENABLE_THREADS; }
/// that you can later get it back. This function is intended for diagnostic
/// purposes, and as with setting a thread's name no indication of whether
/// the operation succeeded or failed is returned.
- void get_thread_name(SmallVectorImpl<char> &Name);
+ LLVM_ABI void get_thread_name(SmallVectorImpl<char> &Name);
/// Returns a mask that represents on which hardware thread, core, CPU, NUMA
/// group, the calling thread can be executed. On Windows, threads cannot
/// cross CPU sockets boundaries.
- llvm::BitVector get_thread_affinity_mask();
+ LLVM_ABI llvm::BitVector get_thread_affinity_mask();
/// Returns how many physical CPUs or NUMA groups the system has.
- unsigned get_cpus();
+ LLVM_ABI unsigned get_cpus();
/// Returns how many physical cores (as opposed to logical cores returned from
/// thread::hardware_concurrency(), which includes hyperthreads).
/// Returns -1 if unknown for the current host system.
- int get_physical_cores();
+ LLVM_ABI int get_physical_cores();
enum class ThreadPriority {
/// Lower the current thread's priority as much as possible. Can be used
@@ -264,7 +265,7 @@ constexpr bool llvm_is_multithreaded() { return LLVM_ENABLE_THREADS; }
Default = 2,
};
enum class SetThreadPriorityResult { FAILURE, SUCCESS };
- SetThreadPriorityResult set_thread_priority(ThreadPriority Priority);
+ LLVM_ABI SetThreadPriorityResult set_thread_priority(ThreadPriority Priority);
}
#endif
diff --git a/llvm/include/llvm/Support/TimeProfiler.h b/llvm/include/llvm/Support/TimeProfiler.h
index 679e157..3a6fc8c6 100644
--- a/llvm/include/llvm/Support/TimeProfiler.h
+++ b/llvm/include/llvm/Support/TimeProfiler.h
@@ -77,6 +77,7 @@
#define LLVM_SUPPORT_TIMEPROFILER_H
#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
namespace llvm {
@@ -106,24 +107,24 @@ struct TimeTraceMetadata {
};
struct TimeTraceProfiler;
-TimeTraceProfiler *getTimeTraceProfilerInstance();
+LLVM_ABI TimeTraceProfiler *getTimeTraceProfilerInstance();
-bool isTimeTraceVerbose();
+LLVM_ABI bool isTimeTraceVerbose();
struct TimeTraceProfilerEntry;
/// Initialize the time trace profiler.
/// This sets up the global \p TimeTraceProfilerInstance
/// variable to be the profiler instance.
-void timeTraceProfilerInitialize(unsigned TimeTraceGranularity,
- StringRef ProcName,
- bool TimeTraceVerbose = false);
+LLVM_ABI void timeTraceProfilerInitialize(unsigned TimeTraceGranularity,
+ StringRef ProcName,
+ bool TimeTraceVerbose = false);
/// Cleanup the time trace profiler, if it was initialized.
-void timeTraceProfilerCleanup();
+LLVM_ABI void timeTraceProfilerCleanup();
/// Finish a time trace profiler running on a worker thread.
-void timeTraceProfilerFinishThread();
+LLVM_ABI void timeTraceProfilerFinishThread();
/// Is the time trace profiler enabled, i.e. initialized?
inline bool timeTraceProfilerEnabled() {
@@ -133,27 +134,27 @@ inline bool timeTraceProfilerEnabled() {
/// Write profiling data to output stream.
/// Data produced is JSON, in Chrome "Trace Event" format, see
/// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview
-void timeTraceProfilerWrite(raw_pwrite_stream &OS);
+LLVM_ABI void timeTraceProfilerWrite(raw_pwrite_stream &OS);
/// Write profiling data to a file.
/// The function will write to \p PreferredFileName if provided, if not
/// then will write to \p FallbackFileName appending .time-trace.
/// Returns a StringError indicating a failure if the function is
/// unable to open the file for writing.
-Error timeTraceProfilerWrite(StringRef PreferredFileName,
- StringRef FallbackFileName);
+LLVM_ABI Error timeTraceProfilerWrite(StringRef PreferredFileName,
+ StringRef FallbackFileName);
/// Manually begin a time section, with the given \p Name and \p Detail.
/// Profiler copies the string data, so the pointers can be given into
/// temporaries. Time sections can be hierarchical; every Begin must have a
/// matching End pair but they can nest.
-TimeTraceProfilerEntry *timeTraceProfilerBegin(StringRef Name,
- StringRef Detail);
-TimeTraceProfilerEntry *
+LLVM_ABI TimeTraceProfilerEntry *timeTraceProfilerBegin(StringRef Name,
+ StringRef Detail);
+LLVM_ABI TimeTraceProfilerEntry *
timeTraceProfilerBegin(StringRef Name,
llvm::function_ref<std::string()> Detail);
-TimeTraceProfilerEntry *
+LLVM_ABI TimeTraceProfilerEntry *
timeTraceProfilerBegin(StringRef Name,
llvm::function_ref<TimeTraceMetadata()> MetaData);
@@ -162,16 +163,17 @@ timeTraceProfilerBegin(StringRef Name,
/// separately from other traces. See
/// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#heading=h.jh64i9l3vwa1
/// for more details.
-TimeTraceProfilerEntry *timeTraceAsyncProfilerBegin(StringRef Name,
- StringRef Detail);
+LLVM_ABI TimeTraceProfilerEntry *timeTraceAsyncProfilerBegin(StringRef Name,
+ StringRef Detail);
// Mark an instant event.
-void timeTraceAddInstantEvent(StringRef Name,
- llvm::function_ref<std::string()> Detail);
+LLVM_ABI void
+timeTraceAddInstantEvent(StringRef Name,
+ llvm::function_ref<std::string()> Detail);
/// Manually end the last time section.
-void timeTraceProfilerEnd();
-void timeTraceProfilerEnd(TimeTraceProfilerEntry *E);
+LLVM_ABI void timeTraceProfilerEnd();
+LLVM_ABI void timeTraceProfilerEnd(TimeTraceProfilerEntry *E);
/// The TimeTraceScope is a helper class to call the begin and end functions
/// of the time trace profiler. When the object is constructed, it begins
diff --git a/llvm/include/llvm/Support/Timer.h b/llvm/include/llvm/Support/Timer.h
index 5a5082b..36890c7 100644
--- a/llvm/include/llvm/Support/Timer.h
+++ b/llvm/include/llvm/Support/Timer.h
@@ -11,6 +11,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Mutex.h"
#include <cassert>
@@ -37,7 +38,7 @@ public:
/// usage before the time, otherwise we get time before memory usage. This
/// matters if the time to get the memory usage is significant and shouldn't
/// be counted as part of a duration.
- static TimeRecord getCurrentTime(bool Start = true);
+ LLVM_ABI static TimeRecord getCurrentTime(bool Start = true);
double getProcessTime() const { return UserTime + SystemTime; }
double getUserTime() const { return UserTime; }
@@ -68,7 +69,7 @@ public:
/// Print the current time record to \p OS, with a breakdown showing
/// contributions to the \p Total time record.
- void print(const TimeRecord &Total, raw_ostream &OS) const;
+ LLVM_ABI void print(const TimeRecord &Total, raw_ostream &OS) const;
};
/// This class is used to track the amount of time spent between invocations of
@@ -103,12 +104,13 @@ public:
assert(!TG && !T.TG && "Can only assign uninit timers");
return *this;
}
- ~Timer();
+ LLVM_ABI ~Timer();
/// Create an uninitialized timer, client must use 'init'.
explicit Timer() = default;
- void init(StringRef TimerName, StringRef TimerDescription);
- void init(StringRef TimerName, StringRef TimerDescription, TimerGroup &tg);
+ LLVM_ABI void init(StringRef TimerName, StringRef TimerDescription);
+ LLVM_ABI void init(StringRef TimerName, StringRef TimerDescription,
+ TimerGroup &tg);
const std::string &getName() const { return Name; }
const std::string &getDescription() const { return Description; }
@@ -123,16 +125,16 @@ public:
/// Start the timer running. Time between calls to startTimer/stopTimer is
/// counted by the Timer class. Note that these calls must be correctly
/// paired.
- void startTimer();
+ LLVM_ABI void startTimer();
/// Stop the timer.
- void stopTimer();
+ LLVM_ABI void stopTimer();
/// Clear the timer state.
- void clear();
+ LLVM_ABI void clear();
/// Stop the timer and start another timer.
- void yieldTo(Timer &);
+ LLVM_ABI void yieldTo(Timer &);
/// Return the duration for which this timer has been running.
TimeRecord getTotalTime() const { return Time; }
@@ -166,14 +168,15 @@ public:
/// statement. All timers with the same name are merged. This is primarily
/// used for debugging and for hunting performance problems.
struct NamedRegionTimer : public TimeRegion {
- explicit NamedRegionTimer(StringRef Name, StringRef Description,
- StringRef GroupName,
- StringRef GroupDescription, bool Enabled = true);
+ LLVM_ABI explicit NamedRegionTimer(StringRef Name, StringRef Description,
+ StringRef GroupName,
+ StringRef GroupDescription,
+ bool Enabled = true);
// Create or get a TimerGroup stored in the same global map owned by
// NamedRegionTimer.
- static TimerGroup &getNamedTimerGroup(StringRef GroupName,
- StringRef GroupDescription);
+ LLVM_ABI static TimerGroup &getNamedTimerGroup(StringRef GroupName,
+ StringRef GroupDescription);
};
/// The TimerGroup class is used to group together related timers into a single
@@ -211,12 +214,12 @@ class TimerGroup {
sys::SmartMutex<true> &lock);
public:
- explicit TimerGroup(StringRef Name, StringRef Description);
+ LLVM_ABI explicit TimerGroup(StringRef Name, StringRef Description);
- explicit TimerGroup(StringRef Name, StringRef Description,
- const StringMap<TimeRecord> &Records);
+ LLVM_ABI explicit TimerGroup(StringRef Name, StringRef Description,
+ const StringMap<TimeRecord> &Records);
- ~TimerGroup();
+ LLVM_ABI ~TimerGroup();
void setName(StringRef NewName, StringRef NewDescription) {
Name.assign(NewName.begin(), NewName.end());
@@ -225,32 +228,33 @@ public:
/// Print any started timers in this group, optionally resetting timers after
/// printing them.
- void print(raw_ostream &OS, bool ResetAfterPrint = false);
+ LLVM_ABI void print(raw_ostream &OS, bool ResetAfterPrint = false);
/// Clear all timers in this group.
- void clear();
+ LLVM_ABI void clear();
/// This static method prints all timers.
- static void printAll(raw_ostream &OS);
+ LLVM_ABI static void printAll(raw_ostream &OS);
/// Clear out all timers. This is mostly used to disable automatic
/// printing on shutdown, when timers have already been printed explicitly
/// using \c printAll or \c printJSONValues.
- static void clearAll();
+ LLVM_ABI static void clearAll();
- const char *printJSONValues(raw_ostream &OS, const char *delim);
+ LLVM_ABI const char *printJSONValues(raw_ostream &OS, const char *delim);
/// Prints all timers as JSON key/value pairs.
- static const char *printAllJSONValues(raw_ostream &OS, const char *delim);
+ LLVM_ABI static const char *printAllJSONValues(raw_ostream &OS,
+ const char *delim);
/// Ensure global objects required for statistics printing are initialized.
/// This function is used by the Statistic code to ensure correct order of
/// global constructors and destructors.
- static void constructForStatistics();
+ LLVM_ABI static void constructForStatistics();
/// This makes the timer globals unmanaged, and lets the user manage the
/// lifetime.
- static void *acquireTimerGlobals();
+ LLVM_ABI static void *acquireTimerGlobals();
private:
friend class Timer;
diff --git a/llvm/include/llvm/Support/ToolOutputFile.h b/llvm/include/llvm/Support/ToolOutputFile.h
index c16fb03..8bad82c 100644
--- a/llvm/include/llvm/Support/ToolOutputFile.h
+++ b/llvm/include/llvm/Support/ToolOutputFile.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_TOOLOUTPUTFILE_H
#define LLVM_SUPPORT_TOOLOUTPUTFILE_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <optional>
@@ -27,8 +28,8 @@ public:
bool Keep;
StringRef getFilename() { return Filename; }
- explicit CleanupInstaller(StringRef Filename);
- ~CleanupInstaller();
+ LLVM_ABI explicit CleanupInstaller(StringRef Filename);
+ LLVM_ABI ~CleanupInstaller();
};
/// This class contains a raw_fd_ostream and adds a few extra features commonly
@@ -53,10 +54,10 @@ class ToolOutputFile {
public:
/// This constructor's arguments are passed to raw_fd_ostream's
/// constructor.
- ToolOutputFile(StringRef Filename, std::error_code &EC,
- sys::fs::OpenFlags Flags);
+ LLVM_ABI ToolOutputFile(StringRef Filename, std::error_code &EC,
+ sys::fs::OpenFlags Flags);
- ToolOutputFile(StringRef Filename, int FD);
+ LLVM_ABI ToolOutputFile(StringRef Filename, int FD);
/// Return the contained raw_fd_ostream.
raw_fd_ostream &os() { return *OS; }
diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h
index bae833e..9642529 100644
--- a/llvm/include/llvm/Support/TypeSize.h
+++ b/llvm/include/llvm/Support/TypeSize.h
@@ -15,6 +15,7 @@
#ifndef LLVM_SUPPORT_TYPESIZE_H
#define LLVM_SUPPORT_TYPESIZE_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -27,7 +28,7 @@ namespace llvm {
/// Reports a diagnostic message to indicate an invalid size request has been
/// done on a scalable vector. This function may not return.
-void reportInvalidSizeRequest(const char *Msg);
+LLVM_ABI void reportInvalidSizeRequest(const char *Msg);
/// StackOffset holds a fixed and a scalable offset in bytes.
class StackOffset {
@@ -374,7 +375,7 @@ public:
// else
// bail out early for scalable vectors and use getFixedValue()
// }
- operator ScalarTy() const;
+ LLVM_ABI operator ScalarTy() const;
// Additional operators needed to avoid ambiguous parses
// because of the implicit conversion hack.
diff --git a/llvm/include/llvm/Support/Unicode.h b/llvm/include/llvm/Support/Unicode.h
index 8615487..7c790da 100644
--- a/llvm/include/llvm/Support/Unicode.h
+++ b/llvm/include/llvm/Support/Unicode.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_UNICODE_H
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Compiler.h"
#include <optional>
#include <string>
@@ -36,10 +37,10 @@ enum ColumnWidthErrors {
///
/// Printable codepoints are those in the categories L, M, N, P, S and Zs
/// \return true if the character is considered printable.
-bool isPrintable(int UCS);
+LLVM_ABI bool isPrintable(int UCS);
// Formatting codepoints are codepoints in the Cf category.
-bool isFormatting(int UCS);
+LLVM_ABI bool isFormatting(int UCS);
/// Gets the number of positions the UTF8-encoded \p Text is likely to occupy
/// when output on a terminal ("character width"). This depends on the
@@ -55,11 +56,11 @@ bool isFormatting(int UCS);
/// * 0 for each non-spacing and enclosing combining mark;
/// * 2 for each CJK character excluding halfwidth forms;
/// * 1 for each of the remaining characters.
-int columnWidthUTF8(StringRef Text);
+LLVM_ABI int columnWidthUTF8(StringRef Text);
/// Fold input unicode character according the Simple unicode case folding
/// rules.
-int foldCharSimple(int C);
+LLVM_ABI int foldCharSimple(int C);
/// Maps the name or the alias of a Unicode character to its associated
/// codepoints.
@@ -67,14 +68,15 @@ int foldCharSimple(int C);
/// For compatibility with the semantics of named character escape sequences in
/// C++, this mapping does an exact match sensitive to casing and spacing.
/// \return The codepoint of the corresponding character, if any.
-std::optional<char32_t> nameToCodepointStrict(StringRef Name);
+LLVM_ABI std::optional<char32_t> nameToCodepointStrict(StringRef Name);
struct LooseMatchingResult {
char32_t CodePoint;
SmallString<64> Name;
};
-std::optional<LooseMatchingResult> nameToCodepointLooseMatching(StringRef Name);
+LLVM_ABI std::optional<LooseMatchingResult>
+nameToCodepointLooseMatching(StringRef Name);
struct MatchForCodepointName {
std::string Name;
@@ -82,7 +84,7 @@ struct MatchForCodepointName {
char32_t Value = 0;
};
-SmallVector<MatchForCodepointName>
+LLVM_ABI SmallVector<MatchForCodepointName>
nearestMatchesForCodepointName(StringRef Pattern, std::size_t MaxMatchesCount);
} // namespace unicode
diff --git a/llvm/include/llvm/Support/Valgrind.h b/llvm/include/llvm/Support/Valgrind.h
index 1e14dfe..894ddec 100644
--- a/llvm/include/llvm/Support/Valgrind.h
+++ b/llvm/include/llvm/Support/Valgrind.h
@@ -15,16 +15,17 @@
#ifndef LLVM_SUPPORT_VALGRIND_H
#define LLVM_SUPPORT_VALGRIND_H
+#include "llvm/Support/Compiler.h"
#include <cstddef>
namespace llvm {
namespace sys {
// True if Valgrind is controlling this process.
- bool RunningOnValgrind();
+LLVM_ABI bool RunningOnValgrind();
- // Discard valgrind's translation of code in the range [Addr .. Addr + Len).
- // Otherwise valgrind may continue to execute the old version of the code.
- void ValgrindDiscardTranslations(const void *Addr, size_t Len);
+// Discard valgrind's translation of code in the range [Addr .. Addr + Len).
+// Otherwise valgrind may continue to execute the old version of the code.
+LLVM_ABI void ValgrindDiscardTranslations(const void *Addr, size_t Len);
} // namespace sys
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/VersionTuple.h b/llvm/include/llvm/Support/VersionTuple.h
index aeb4798..867f81d 100644
--- a/llvm/include/llvm/Support/VersionTuple.h
+++ b/llvm/include/llvm/Support/VersionTuple.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/Support/Compiler.h"
#include <optional>
#include <string>
#include <tuple>
@@ -100,7 +101,7 @@ public:
/// Return a version tuple that contains a different major version but
/// everything else is the same.
- VersionTuple withMajorReplaced(unsigned NewMajor) const;
+ LLVM_ABI VersionTuple withMajorReplaced(unsigned NewMajor) const;
/// Return a version tuple that contains only components that are non-zero.
VersionTuple normalize() const {
@@ -177,16 +178,16 @@ public:
}
/// Retrieve a string representation of the version number.
- std::string getAsString() const;
+ LLVM_ABI std::string getAsString() const;
/// Try to parse the given string as a version number.
/// \returns \c true if the string does not match the regular expression
/// [0-9]+(\.[0-9]+){0,3}
- bool tryParse(StringRef string);
+ LLVM_ABI bool tryParse(StringRef string);
};
/// Print a version number.
-raw_ostream &operator<<(raw_ostream &Out, const VersionTuple &V);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &Out, const VersionTuple &V);
// Provide DenseMapInfo for version tuples.
template <> struct DenseMapInfo<VersionTuple> {
diff --git a/llvm/include/llvm/Support/VirtualFileSystem.h b/llvm/include/llvm/Support/VirtualFileSystem.h
index 1358e88..734b795 100644
--- a/llvm/include/llvm/Support/VirtualFileSystem.h
+++ b/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Chrono.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorOr.h"
@@ -68,18 +69,19 @@ public:
bool ExposesExternalVFSPath = false;
Status() = default;
- Status(const llvm::sys::fs::file_status &Status);
- Status(const Twine &Name, llvm::sys::fs::UniqueID UID,
- llvm::sys::TimePoint<> MTime, uint32_t User, uint32_t Group,
- uint64_t Size, llvm::sys::fs::file_type Type,
- llvm::sys::fs::perms Perms);
+ LLVM_ABI Status(const llvm::sys::fs::file_status &Status);
+ LLVM_ABI Status(const Twine &Name, llvm::sys::fs::UniqueID UID,
+ llvm::sys::TimePoint<> MTime, uint32_t User, uint32_t Group,
+ uint64_t Size, llvm::sys::fs::file_type Type,
+ llvm::sys::fs::perms Perms);
/// Get a copy of a Status with a different size.
- static Status copyWithNewSize(const Status &In, uint64_t NewSize);
+ LLVM_ABI static Status copyWithNewSize(const Status &In, uint64_t NewSize);
/// Get a copy of a Status with a different name.
- static Status copyWithNewName(const Status &In, const Twine &NewName);
- static Status copyWithNewName(const llvm::sys::fs::file_status &In,
- const Twine &NewName);
+ LLVM_ABI static Status copyWithNewName(const Status &In,
+ const Twine &NewName);
+ LLVM_ABI static Status copyWithNewName(const llvm::sys::fs::file_status &In,
+ const Twine &NewName);
/// Returns the name that should be used for this file or directory.
StringRef getName() const { return Name; }
@@ -97,18 +99,18 @@ public:
/// @name Status queries
/// These are static queries in llvm::sys::fs.
/// @{
- bool equivalent(const Status &Other) const;
- bool isDirectory() const;
- bool isRegularFile() const;
- bool isOther() const;
- bool isSymlink() const;
- bool isStatusKnown() const;
- bool exists() const;
+ LLVM_ABI bool equivalent(const Status &Other) const;
+ LLVM_ABI bool isDirectory() const;
+ LLVM_ABI bool isRegularFile() const;
+ LLVM_ABI bool isOther() const;
+ LLVM_ABI bool isSymlink() const;
+ LLVM_ABI bool isStatusKnown() const;
+ LLVM_ABI bool exists() const;
/// @}
};
/// Represents an open file.
-class File {
+class LLVM_ABI File {
public:
/// Destroy the file after closing it (if open).
/// Sub-classes should generally call close() inside their destructors. We
@@ -162,7 +164,7 @@ namespace detail {
/// An interface for virtual file systems to provide an iterator over the
/// (non-recursive) contents of a directory.
-struct DirIterImpl {
+struct LLVM_ABI DirIterImpl {
virtual ~DirIterImpl();
/// Sets \c CurrentEntry to the next entry in the directory on success,
@@ -232,14 +234,14 @@ class recursive_directory_iterator {
State; // Input iterator semantics on copy.
public:
- recursive_directory_iterator(FileSystem &FS, const Twine &Path,
- std::error_code &EC);
+ LLVM_ABI recursive_directory_iterator(FileSystem &FS, const Twine &Path,
+ std::error_code &EC);
/// Construct an 'end' iterator.
recursive_directory_iterator() = default;
/// Equivalent to operator++, with an error code.
- recursive_directory_iterator &increment(std::error_code &EC);
+ LLVM_ABI recursive_directory_iterator &increment(std::error_code &EC);
const directory_entry &operator*() const { return *State->Stack.back(); }
const directory_entry *operator->() const { return &*State->Stack.back(); }
@@ -262,8 +264,8 @@ public:
};
/// The virtual file system interface.
-class FileSystem : public llvm::ThreadSafeRefCountedBase<FileSystem>,
- public RTTIExtends<FileSystem, RTTIRoot> {
+class LLVM_ABI FileSystem : public llvm::ThreadSafeRefCountedBase<FileSystem>,
+ public RTTIExtends<FileSystem, RTTIRoot> {
public:
static const char ID;
virtual ~FileSystem();
@@ -370,13 +372,13 @@ protected:
/// the operating system.
/// The working directory is linked to the process's working directory.
/// (This is usually thread-hostile).
-IntrusiveRefCntPtr<FileSystem> getRealFileSystem();
+LLVM_ABI IntrusiveRefCntPtr<FileSystem> getRealFileSystem();
/// Create an \p vfs::FileSystem for the 'real' file system, as seen by
/// the operating system.
/// It has its own working directory, independent of (but initially equal to)
/// that of the process.
-std::unique_ptr<FileSystem> createPhysicalFileSystem();
+LLVM_ABI std::unique_ptr<FileSystem> createPhysicalFileSystem();
/// A file system that allows overlaying one \p AbstractFileSystem on top
/// of another.
@@ -388,7 +390,8 @@ std::unique_ptr<FileSystem> createPhysicalFileSystem();
/// top-most (most recently added) directory are used. When there is a file
/// that exists in more than one file system, the file in the top-most file
/// system overrides the other(s).
-class OverlayFileSystem : public RTTIExtends<OverlayFileSystem, FileSystem> {
+class LLVM_ABI OverlayFileSystem
+ : public RTTIExtends<OverlayFileSystem, FileSystem> {
using FileSystemList = SmallVector<IntrusiveRefCntPtr<FileSystem>, 1>;
/// The stack of file systems, implemented as a list in order of
@@ -448,7 +451,8 @@ protected:
/// By default, this delegates all calls to the underlying file system. This
/// is useful when derived file systems want to override some calls and still
/// proxy other calls.
-class ProxyFileSystem : public RTTIExtends<ProxyFileSystem, FileSystem> {
+class LLVM_ABI ProxyFileSystem
+ : public RTTIExtends<ProxyFileSystem, FileSystem> {
public:
static const char ID;
explicit ProxyFileSystem(IntrusiveRefCntPtr<FileSystem> FS)
@@ -510,7 +514,7 @@ struct NewInMemoryNodeInfo {
llvm::sys::fs::file_type Type;
llvm::sys::fs::perms Perms;
- Status makeStatus() const;
+ LLVM_ABI Status makeStatus() const;
};
class NamedNodeOrError {
@@ -534,7 +538,8 @@ public:
} // namespace detail
/// An in-memory file system.
-class InMemoryFileSystem : public RTTIExtends<InMemoryFileSystem, FileSystem> {
+class LLVM_ABI InMemoryFileSystem
+ : public RTTIExtends<InMemoryFileSystem, FileSystem> {
std::unique_ptr<detail::InMemoryDirectory> Root;
std::string WorkingDirectory;
bool UseNormalizedPaths = true;
@@ -651,11 +656,11 @@ protected:
};
/// Get a globally unique ID for a virtual file or directory.
-llvm::sys::fs::UniqueID getNextVirtualUniqueID();
+LLVM_ABI llvm::sys::fs::UniqueID getNextVirtualUniqueID();
/// Gets a \p FileSystem for a virtual file system described in YAML
/// format.
-std::unique_ptr<FileSystem>
+LLVM_ABI std::unique_ptr<FileSystem>
getVFSFromYAML(std::unique_ptr<llvm::MemoryBuffer> Buffer,
llvm::SourceMgr::DiagHandlerTy DiagHandler,
StringRef YAMLFilePath, void *DiagContext = nullptr,
@@ -774,7 +779,7 @@ class RedirectingFileSystemParser;
/// FIXME: 'use-external-name' causes behaviour that's inconsistent with how
/// "real" filesystems behave. Maybe there should be a separate channel for
/// this information.
-class RedirectingFileSystem
+class LLVM_ABI RedirectingFileSystem
: public RTTIExtends<RedirectingFileSystem, vfs::FileSystem> {
public:
static const char ID;
@@ -922,8 +927,8 @@ public:
std::optional<std::string> ExternalRedirect;
public:
- LookupResult(Entry *E, sys::path::const_iterator Start,
- sys::path::const_iterator End);
+ LLVM_ABI LookupResult(Entry *E, sys::path::const_iterator Start,
+ sys::path::const_iterator End);
/// If the found Entry maps the input path to a path in the external
/// file system (i.e. it is a FileEntry or DirectoryRemapEntry), returns
@@ -938,7 +943,7 @@ public:
/// Get the (canonical) path of the found entry. This uses the as-written
/// path components from the VFS specification.
- void getPath(llvm::SmallVectorImpl<char> &Path) const;
+ LLVM_ABI void getPath(llvm::SmallVectorImpl<char> &Path) const;
};
private:
@@ -1031,6 +1036,10 @@ private:
RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS);
+ // Explicitly non-copyable.
+ RedirectingFileSystem(RedirectingFileSystem const &) = delete;
+ RedirectingFileSystem &operator=(RedirectingFileSystem const &) = delete;
+
/// Looks up the path <tt>[Start, End)</tt> in \p From, possibly recursing
/// into the contents of \p From if it is a directory. Returns a LookupResult
/// giving the matched entry and, if that entry is a FileEntry or
@@ -1107,7 +1116,7 @@ protected:
/// Collect all pairs of <virtual path, real path> entries from the
/// \p YAMLFilePath. This is used by the module dependency collector to forward
/// the entries into the reproducer output VFS YAML file.
-void collectVFSFromYAML(
+LLVM_ABI void collectVFSFromYAML(
std::unique_ptr<llvm::MemoryBuffer> Buffer,
llvm::SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
SmallVectorImpl<YAMLVFSEntry> &CollectedEntries,
@@ -1126,8 +1135,8 @@ class YAMLVFSWriter {
public:
YAMLVFSWriter() = default;
- void addFileMapping(StringRef VirtualPath, StringRef RealPath);
- void addDirectoryMapping(StringRef VirtualPath, StringRef RealPath);
+ LLVM_ABI void addFileMapping(StringRef VirtualPath, StringRef RealPath);
+ LLVM_ABI void addDirectoryMapping(StringRef VirtualPath, StringRef RealPath);
void setCaseSensitivity(bool CaseSensitive) {
IsCaseSensitive = CaseSensitive;
@@ -1142,13 +1151,13 @@ public:
const std::vector<YAMLVFSEntry> &getMappings() const { return Mappings; }
- void write(llvm::raw_ostream &OS);
+ LLVM_ABI void write(llvm::raw_ostream &OS);
};
/// File system that tracks the number of calls to the underlying file system.
/// This is particularly useful when wrapped around \c RealFileSystem to add
/// lightweight tracking of expensive syscalls.
-class TracingFileSystem
+class LLVM_ABI TracingFileSystem
: public llvm::RTTIExtends<TracingFileSystem, ProxyFileSystem> {
public:
static const char ID;
diff --git a/llvm/include/llvm/Support/Windows/WindowsSupport.h b/llvm/include/llvm/Support/Windows/WindowsSupport.h
index 6f5aae2..ffc6fdf 100644
--- a/llvm/include/llvm/Support/Windows/WindowsSupport.h
+++ b/llvm/include/llvm/Support/Windows/WindowsSupport.h
@@ -55,18 +55,18 @@ namespace llvm {
/// reimplements one of the helpers in the Windows 8.1 SDK, which are intended
/// to supercede raw calls to GetVersionEx. Old SDKs, Cygwin, and MinGW don't
/// yet have VersionHelpers.h, so we have our own helper.
-bool RunningWindows8OrGreater();
+LLVM_ABI bool RunningWindows8OrGreater();
/// Determines if the program is running on Windows 11 or Windows Server 2022.
-bool RunningWindows11OrGreater();
+LLVM_ABI bool RunningWindows11OrGreater();
/// Returns the Windows version as Major.Minor.0.BuildNumber. Uses
/// RtlGetVersion or GetVersionEx under the hood depending on what is available.
/// GetVersionEx is deprecated, but this API exposes the build number which can
/// be useful for working around certain kernel bugs.
-llvm::VersionTuple GetWindowsOSVersion();
+LLVM_ABI llvm::VersionTuple GetWindowsOSVersion();
-bool MakeErrMsg(std::string *ErrMsg, const std::string &prefix);
+LLVM_ABI bool MakeErrMsg(std::string *ErrMsg, const std::string &prefix);
// Include GetLastError() in a fatal error message.
[[noreturn]] inline void ReportLastErrorFatal(const char *Msg) {
@@ -235,13 +235,15 @@ namespace windows {
// Returns command line arguments. Unlike arguments given to main(),
// this function guarantees that the returned arguments are encoded in
// UTF-8 regardless of the current code page setting.
-std::error_code GetCommandLineArguments(SmallVectorImpl<const char *> &Args,
- BumpPtrAllocator &Alloc);
+LLVM_ABI std::error_code
+GetCommandLineArguments(SmallVectorImpl<const char *> &Args,
+ BumpPtrAllocator &Alloc);
/// Convert UTF-8 path to a suitable UTF-16 path for use with the Win32 Unicode
/// File API.
-std::error_code widenPath(const Twine &Path8, SmallVectorImpl<wchar_t> &Path16,
- size_t MaxPathLen = MAX_PATH);
+LLVM_ABI std::error_code widenPath(const Twine &Path8,
+ SmallVectorImpl<wchar_t> &Path16,
+ size_t MaxPathLen = MAX_PATH);
} // end namespace windows
} // end namespace sys
diff --git a/llvm/include/llvm/Support/WindowsError.h b/llvm/include/llvm/Support/WindowsError.h
index d11e901..f42c61b 100644
--- a/llvm/include/llvm/Support/WindowsError.h
+++ b/llvm/include/llvm/Support/WindowsError.h
@@ -9,11 +9,12 @@
#ifndef LLVM_SUPPORT_WINDOWSERROR_H
#define LLVM_SUPPORT_WINDOWSERROR_H
+#include "llvm/Support/Compiler.h"
#include <system_error>
namespace llvm {
-std::error_code mapLastWindowsError();
-std::error_code mapWindowsError(unsigned EV);
+LLVM_ABI std::error_code mapLastWindowsError();
+LLVM_ABI std::error_code mapWindowsError(unsigned EV);
}
#endif
diff --git a/llvm/include/llvm/Support/WithColor.h b/llvm/include/llvm/Support/WithColor.h
index e80e5d469..2835e17 100644
--- a/llvm/include/llvm/Support/WithColor.h
+++ b/llvm/include/llvm/Support/WithColor.h
@@ -21,7 +21,7 @@ namespace cl {
class OptionCategory;
}
-extern cl::OptionCategory &getColorCategory();
+LLVM_ABI extern cl::OptionCategory &getColorCategory();
// Symbolic names for various syntax elements.
enum class HighlightColor {
@@ -59,8 +59,8 @@ public:
/// @param OS The output stream
/// @param S Symbolic name for syntax element to color
/// @param Mode Enable, disable or compute whether to use colors.
- LLVM_CTOR_NODISCARD WithColor(raw_ostream &OS, HighlightColor S,
- ColorMode Mode = ColorMode::Auto);
+ LLVM_CTOR_NODISCARD LLVM_ABI WithColor(raw_ostream &OS, HighlightColor S,
+ ColorMode Mode = ColorMode::Auto);
/// To be used like this: WithColor(OS, raw_ostream::BLACK) << "text";
/// @param OS The output stream
/// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
@@ -74,7 +74,7 @@ public:
: OS(OS), Mode(Mode) {
changeColor(Color, Bold, BG);
}
- ~WithColor();
+ LLVM_ABI ~WithColor();
raw_ostream &get() { return OS; }
operator raw_ostream &() { return OS; }
@@ -88,55 +88,55 @@ public:
}
/// Convenience method for printing "error: " to stderr.
- static raw_ostream &error();
+ LLVM_ABI static raw_ostream &error();
/// Convenience method for printing "warning: " to stderr.
- static raw_ostream &warning();
+ LLVM_ABI static raw_ostream &warning();
/// Convenience method for printing "note: " to stderr.
- static raw_ostream &note();
+ LLVM_ABI static raw_ostream &note();
/// Convenience method for printing "remark: " to stderr.
- static raw_ostream &remark();
+ LLVM_ABI static raw_ostream &remark();
/// Convenience method for printing "error: " to the given stream.
- static raw_ostream &error(raw_ostream &OS, StringRef Prefix = "",
- bool DisableColors = false);
+ LLVM_ABI static raw_ostream &error(raw_ostream &OS, StringRef Prefix = "",
+ bool DisableColors = false);
/// Convenience method for printing "warning: " to the given stream.
- static raw_ostream &warning(raw_ostream &OS, StringRef Prefix = "",
- bool DisableColors = false);
+ LLVM_ABI static raw_ostream &warning(raw_ostream &OS, StringRef Prefix = "",
+ bool DisableColors = false);
/// Convenience method for printing "note: " to the given stream.
- static raw_ostream &note(raw_ostream &OS, StringRef Prefix = "",
- bool DisableColors = false);
+ LLVM_ABI static raw_ostream &note(raw_ostream &OS, StringRef Prefix = "",
+ bool DisableColors = false);
/// Convenience method for printing "remark: " to the given stream.
- static raw_ostream &remark(raw_ostream &OS, StringRef Prefix = "",
- bool DisableColors = false);
+ LLVM_ABI static raw_ostream &remark(raw_ostream &OS, StringRef Prefix = "",
+ bool DisableColors = false);
/// Determine whether colors are displayed.
- bool colorsEnabled();
+ LLVM_ABI bool colorsEnabled();
/// Change the color of text that will be output from this point forward.
/// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
/// change only the bold attribute, and keep colors untouched
/// @param Bold Bold/brighter text, default false
/// @param BG If true, change the background, default: change foreground
- WithColor &changeColor(raw_ostream::Colors Color, bool Bold = false,
- bool BG = false);
+ LLVM_ABI WithColor &changeColor(raw_ostream::Colors Color, bool Bold = false,
+ bool BG = false);
/// Reset the colors to terminal defaults. Call this when you are done
/// outputting colored text, or before program exit.
- WithColor &resetColor();
+ LLVM_ABI WithColor &resetColor();
/// Implement default handling for Error.
/// Print "error: " to stderr.
- static void defaultErrorHandler(Error Err);
+ LLVM_ABI static void defaultErrorHandler(Error Err);
/// Implement default handling for Warning.
/// Print "warning: " to stderr.
- static void defaultWarningHandler(Error Warning);
+ LLVM_ABI static void defaultWarningHandler(Error Warning);
/// Retrieve the default color auto detection function.
- static AutoDetectFunctionType defaultAutoDetectFunction();
+ LLVM_ABI static AutoDetectFunctionType defaultAutoDetectFunction();
/// Change the global auto detection function.
- static void
+ LLVM_ABI static void
setAutoDetectFunction(AutoDetectFunctionType NewAutoDetectFunction);
private:
diff --git a/llvm/include/llvm/Support/YAMLParser.h b/llvm/include/llvm/Support/YAMLParser.h
index 9d95a1e..da645f2 100644
--- a/llvm/include/llvm/Support/YAMLParser.h
+++ b/llvm/include/llvm/Support/YAMLParser.h
@@ -38,6 +38,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include <cassert>
@@ -65,48 +66,48 @@ struct Token;
/// Dump all the tokens in this stream to OS.
/// \returns true if there was an error, false otherwise.
-bool dumpTokens(StringRef Input, raw_ostream &);
+LLVM_ABI bool dumpTokens(StringRef Input, raw_ostream &);
/// Scans all tokens in input without outputting anything. This is used
/// for benchmarking the tokenizer.
/// \returns true if there was an error, false otherwise.
-bool scanTokens(StringRef Input);
+LLVM_ABI bool scanTokens(StringRef Input);
/// Escape \a Input for a double quoted scalar; if \p EscapePrintable
/// is true, all UTF8 sequences will be escaped, if \p EscapePrintable is
/// false, those UTF8 sequences encoding printable unicode scalars will not be
/// escaped, but emitted verbatim.
-std::string escape(StringRef Input, bool EscapePrintable = true);
+LLVM_ABI std::string escape(StringRef Input, bool EscapePrintable = true);
/// Parse \p S as a bool according to https://yaml.org/type/bool.html.
-std::optional<bool> parseBool(StringRef S);
+LLVM_ABI std::optional<bool> parseBool(StringRef S);
/// This class represents a YAML stream potentially containing multiple
/// documents.
class Stream {
public:
/// This keeps a reference to the string referenced by \p Input.
- Stream(StringRef Input, SourceMgr &, bool ShowColors = true,
- std::error_code *EC = nullptr);
+ LLVM_ABI Stream(StringRef Input, SourceMgr &, bool ShowColors = true,
+ std::error_code *EC = nullptr);
- Stream(MemoryBufferRef InputBuffer, SourceMgr &, bool ShowColors = true,
- std::error_code *EC = nullptr);
- ~Stream();
+ LLVM_ABI Stream(MemoryBufferRef InputBuffer, SourceMgr &,
+ bool ShowColors = true, std::error_code *EC = nullptr);
+ LLVM_ABI ~Stream();
- document_iterator begin();
- document_iterator end();
- void skip();
- bool failed();
+ LLVM_ABI document_iterator begin();
+ LLVM_ABI document_iterator end();
+ LLVM_ABI void skip();
+ LLVM_ABI bool failed();
bool validate() {
skip();
return !failed();
}
- void printError(Node *N, const Twine &Msg,
- SourceMgr::DiagKind Kind = SourceMgr::DK_Error);
- void printError(const SMRange &Range, const Twine &Msg,
- SourceMgr::DiagKind Kind = SourceMgr::DK_Error);
+ LLVM_ABI void printError(Node *N, const Twine &Msg,
+ SourceMgr::DiagKind Kind = SourceMgr::DK_Error);
+ LLVM_ABI void printError(const SMRange &Range, const Twine &Msg,
+ SourceMgr::DiagKind Kind = SourceMgr::DK_Error);
private:
friend class Document;
@@ -116,7 +117,7 @@ private:
};
/// Abstract base class for all Nodes.
-class Node {
+class LLVM_ABI Node {
virtual void anchor();
public:
@@ -194,7 +195,7 @@ private:
///
/// Example:
/// !!null null
-class NullNode final : public Node {
+class LLVM_ABI NullNode final : public Node {
void anchor() override;
public:
@@ -209,7 +210,7 @@ public:
///
/// Example:
/// Adena
-class ScalarNode final : public Node {
+class LLVM_ABI ScalarNode final : public Node {
void anchor() override;
public:
@@ -257,7 +258,7 @@ private:
/// |
/// Hello
/// World
-class BlockScalarNode final : public Node {
+class LLVM_ABI BlockScalarNode final : public Node {
void anchor() override;
public:
@@ -287,7 +288,7 @@ private:
///
/// Example:
/// Section: .text
-class KeyValueNode final : public Node {
+class LLVM_ABI KeyValueNode final : public Node {
void anchor() override;
public:
@@ -416,7 +417,7 @@ template <class CollectionType> void skip(CollectionType &C) {
/// Example:
/// Name: _main
/// Scope: Global
-class MappingNode final : public Node {
+class LLVM_ABI MappingNode final : public Node {
void anchor() override;
public:
@@ -464,7 +465,7 @@ private:
/// Example:
/// - Hello
/// - World
-class SequenceNode final : public Node {
+class LLVM_ABI SequenceNode final : public Node {
void anchor() override;
public:
@@ -516,7 +517,7 @@ private:
///
/// Example:
/// *AnchorName
-class AliasNode final : public Node {
+class LLVM_ABI AliasNode final : public Node {
void anchor() override;
public:
@@ -535,14 +536,14 @@ private:
/// node.
class Document {
public:
- Document(Stream &ParentStream);
+ LLVM_ABI Document(Stream &ParentStream);
/// Root for parsing a node. Returns a single node.
- Node *parseBlockNode();
+ LLVM_ABI Node *parseBlockNode();
/// Finish parsing the current document and return true if there are
/// more. Return false otherwise.
- bool skip();
+ LLVM_ABI bool skip();
/// Parse and return the root level node.
Node *getRoot() {
diff --git a/llvm/include/llvm/Support/YAMLTraits.h b/llvm/include/llvm/Support/YAMLTraits.h
index 212b60a..1b1d78d 100644
--- a/llvm/include/llvm/Support/YAMLTraits.h
+++ b/llvm/include/llvm/Support/YAMLTraits.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
@@ -775,7 +776,7 @@ struct unvalidatedMappingTraits
!has_MappingValidateTraits<T, Context>::value> {};
// Base class for Input and Output.
-class IO {
+class LLVM_ABI IO {
public:
IO(void *Ctxt = nullptr);
virtual ~IO();
@@ -1196,92 +1197,92 @@ yamlize(IO &io, T &Seq, bool, Context &Ctx) {
template<>
struct ScalarTraits<bool> {
- static void output(const bool &, void* , raw_ostream &);
- static StringRef input(StringRef, void *, bool &);
+ LLVM_ABI static void output(const bool &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, bool &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<StringRef> {
- static void output(const StringRef &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, StringRef &);
+ LLVM_ABI static void output(const StringRef &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, StringRef &);
static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
};
template<>
struct ScalarTraits<std::string> {
- static void output(const std::string &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, std::string &);
+ LLVM_ABI static void output(const std::string &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, std::string &);
static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
};
template<>
struct ScalarTraits<uint8_t> {
- static void output(const uint8_t &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, uint8_t &);
+ LLVM_ABI static void output(const uint8_t &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, uint8_t &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<uint16_t> {
- static void output(const uint16_t &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, uint16_t &);
+ LLVM_ABI static void output(const uint16_t &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, uint16_t &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<uint32_t> {
- static void output(const uint32_t &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, uint32_t &);
+ LLVM_ABI static void output(const uint32_t &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, uint32_t &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<uint64_t> {
- static void output(const uint64_t &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, uint64_t &);
+ LLVM_ABI static void output(const uint64_t &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, uint64_t &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<int8_t> {
- static void output(const int8_t &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, int8_t &);
+ LLVM_ABI static void output(const int8_t &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, int8_t &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<int16_t> {
- static void output(const int16_t &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, int16_t &);
+ LLVM_ABI static void output(const int16_t &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, int16_t &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<int32_t> {
- static void output(const int32_t &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, int32_t &);
+ LLVM_ABI static void output(const int32_t &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, int32_t &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<int64_t> {
- static void output(const int64_t &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, int64_t &);
+ LLVM_ABI static void output(const int64_t &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, int64_t &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<float> {
- static void output(const float &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, float &);
+ LLVM_ABI static void output(const float &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, float &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<double> {
- static void output(const double &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, double &);
+ LLVM_ABI static void output(const double &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, double &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
@@ -1424,7 +1425,7 @@ private:
/// the mapRequired() method calls may not be in the same order
/// as the keys in the document.
///
-class Input : public IO {
+class LLVM_ABI Input : public IO {
public:
// Construct a yaml Input object from a StringRef and optional
// user-data. The DiagHandler can be specified to provide
@@ -1582,7 +1583,7 @@ private:
/// The Output class is used to generate a yaml document from in-memory structs
/// and vectors.
///
-class Output : public IO {
+class LLVM_ABI Output : public IO {
public:
Output(raw_ostream &, void *Ctxt = nullptr, int WrapColumn = 70);
~Output() override;
@@ -1742,35 +1743,36 @@ LLVM_YAML_STRONG_TYPEDEF(uint64_t, Hex64)
template<>
struct ScalarTraits<Hex8> {
- static void output(const Hex8 &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, Hex8 &);
+ LLVM_ABI static void output(const Hex8 &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, Hex8 &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<Hex16> {
- static void output(const Hex16 &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, Hex16 &);
+ LLVM_ABI static void output(const Hex16 &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, Hex16 &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<Hex32> {
- static void output(const Hex32 &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, Hex32 &);
+ LLVM_ABI static void output(const Hex32 &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, Hex32 &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template<>
struct ScalarTraits<Hex64> {
- static void output(const Hex64 &, void *, raw_ostream &);
- static StringRef input(StringRef, void *, Hex64 &);
+ LLVM_ABI static void output(const Hex64 &, void *, raw_ostream &);
+ LLVM_ABI static StringRef input(StringRef, void *, Hex64 &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
template <> struct ScalarTraits<VersionTuple> {
- static void output(const VersionTuple &Value, void *, llvm::raw_ostream &Out);
- static StringRef input(StringRef, void *, VersionTuple &);
+ LLVM_ABI static void output(const VersionTuple &Value, void *,
+ llvm::raw_ostream &Out);
+ LLVM_ABI static StringRef input(StringRef, void *, VersionTuple &);
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
diff --git a/llvm/include/llvm/Support/raw_os_ostream.h b/llvm/include/llvm/Support/raw_os_ostream.h
index c51a94d..7209143 100644
--- a/llvm/include/llvm/Support/raw_os_ostream.h
+++ b/llvm/include/llvm/Support/raw_os_ostream.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_RAW_OS_OSTREAM_H
#define LLVM_SUPPORT_RAW_OS_OSTREAM_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <iosfwd>
@@ -21,7 +22,7 @@ namespace llvm {
/// raw_os_ostream - A raw_ostream that writes to an std::ostream. This is a
/// simple adaptor class. It does not check for output errors; clients should
/// use the underlying stream to detect errors.
-class raw_os_ostream : public raw_ostream {
+class LLVM_ABI raw_os_ostream : public raw_ostream {
std::ostream &OS;
/// write_impl - See raw_ostream::write_impl.
diff --git a/llvm/include/llvm/Support/raw_ostream.h b/llvm/include/llvm/Support/raw_ostream.h
index d3b4115..f87344e 100644
--- a/llvm/include/llvm/Support/raw_ostream.h
+++ b/llvm/include/llvm/Support/raw_ostream.h
@@ -15,6 +15,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <cstddef>
@@ -49,7 +50,7 @@ class FileLocker;
/// output to a stream. It does not support seeking, reopening, rewinding, line
/// buffered disciplines etc. It is a simple buffer that outputs
/// a chunk at a time.
-class raw_ostream {
+class LLVM_ABI raw_ostream {
public:
// Class kinds to support LLVM-style RTTI.
enum class OStreamKind {
@@ -431,7 +432,7 @@ operator<<(OStream &&OS, const T &Value) {
/// An abstract base class for streams implementations that also support a
/// pwrite operation. This is useful for code that can mostly stream out data,
/// but needs to patch in a header that needs to know the output size.
-class raw_pwrite_stream : public raw_ostream {
+class LLVM_ABI raw_pwrite_stream : public raw_ostream {
virtual void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) = 0;
void anchor() override;
@@ -457,7 +458,7 @@ public:
/// A raw_ostream that writes to a file descriptor.
///
-class raw_fd_ostream : public raw_pwrite_stream {
+class LLVM_ABI raw_fd_ostream : public raw_pwrite_stream {
int FD;
bool ShouldClose;
bool SupportsSeeking = false;
@@ -607,17 +608,17 @@ public:
/// This returns a reference to a raw_fd_ostream for standard output. Use it
/// like: outs() << "foo" << "bar";
-raw_fd_ostream &outs();
+LLVM_ABI raw_fd_ostream &outs();
/// This returns a reference to a raw_ostream for standard error.
/// Use it like: errs() << "foo" << "bar";
/// By default, the stream is tied to stdout to ensure stdout is flushed before
/// stderr is written, to ensure the error messages are written in their
/// expected place.
-raw_fd_ostream &errs();
+LLVM_ABI raw_fd_ostream &errs();
/// This returns a reference to a raw_ostream which simply discards output.
-raw_ostream &nulls();
+LLVM_ABI raw_ostream &nulls();
//===----------------------------------------------------------------------===//
// File Streams
@@ -630,9 +631,9 @@ public:
/// Open the specified file for reading/writing/seeking. If an error occurs,
/// information about the error is put into EC, and the stream should be
/// immediately destroyed.
- raw_fd_stream(StringRef Filename, std::error_code &EC);
+ LLVM_ABI raw_fd_stream(StringRef Filename, std::error_code &EC);
- raw_fd_stream(int fd, bool shouldClose);
+ LLVM_ABI raw_fd_stream(int fd, bool shouldClose);
/// This reads the \p Size bytes into a buffer pointed by \p Ptr.
///
@@ -643,10 +644,10 @@ public:
/// On success, the number of bytes read is returned, and the file position is
/// advanced by this number. On error, -1 is returned, use error() to get the
/// error code.
- ssize_t read(char *Ptr, size_t Size);
+ LLVM_ABI ssize_t read(char *Ptr, size_t Size);
/// Check if \p OS is a pointer of type raw_fd_stream*.
- static bool classof(const raw_ostream *OS);
+ LLVM_ABI static bool classof(const raw_ostream *OS);
};
//===----------------------------------------------------------------------===//
@@ -658,7 +659,7 @@ public:
/// raw_string_ostream operates without a buffer, delegating all memory
/// management to the std::string. Thus the std::string is always up-to-date,
/// may be used directly and there is no need to call flush().
-class raw_string_ostream : public raw_ostream {
+class LLVM_ABI raw_string_ostream : public raw_ostream {
std::string &OS;
/// See raw_ostream::write_impl.
@@ -688,7 +689,7 @@ public:
/// raw_svector_ostream operates without a buffer, delegating all memory
/// management to the SmallString. Thus the SmallString is always up-to-date,
/// may be used directly and there is no need to call flush().
-class raw_svector_ostream : public raw_pwrite_stream {
+class LLVM_ABI raw_svector_ostream : public raw_pwrite_stream {
SmallVectorImpl<char> &OS;
/// See raw_ostream::write_impl.
@@ -728,7 +729,7 @@ public:
};
/// A raw_ostream that discards all output.
-class raw_null_ostream : public raw_pwrite_stream {
+class LLVM_ABI raw_null_ostream : public raw_pwrite_stream {
/// See raw_ostream::write_impl.
void write_impl(const char *Ptr, size_t size) override;
void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;
@@ -742,7 +743,7 @@ public:
~raw_null_ostream() override;
};
-class buffer_ostream : public raw_svector_ostream {
+class LLVM_ABI buffer_ostream : public raw_svector_ostream {
raw_ostream &OS;
SmallVector<char, 0> Buffer;
@@ -753,7 +754,7 @@ public:
~buffer_ostream() override { OS << str(); }
};
-class buffer_unique_ostream : public raw_svector_ostream {
+class LLVM_ABI buffer_unique_ostream : public raw_svector_ostream {
std::unique_ptr<raw_ostream> OS;
SmallVector<char, 0> Buffer;
@@ -835,10 +836,10 @@ class Error;
/// for other names. For raw_fd_ostream instances, the stream writes to
/// a temporary file. The final output file is atomically replaced with the
/// temporary file after the \p Write function is finished.
-Error writeToOutput(StringRef OutputFileName,
- std::function<Error(raw_ostream &)> Write);
+LLVM_ABI Error writeToOutput(StringRef OutputFileName,
+ std::function<Error(raw_ostream &)> Write);
-raw_ostream &operator<<(raw_ostream &OS, std::nullopt_t);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, std::nullopt_t);
template <typename T, typename = decltype(std::declval<raw_ostream &>()
<< std::declval<const T &>())>
diff --git a/llvm/include/llvm/Support/raw_socket_stream.h b/llvm/include/llvm/Support/raw_socket_stream.h
index 6c65a66..47352e3 100644
--- a/llvm/include/llvm/Support/raw_socket_stream.h
+++ b/llvm/include/llvm/Support/raw_socket_stream.h
@@ -14,6 +14,7 @@
#ifndef LLVM_SUPPORT_RAW_SOCKET_STREAM_H
#define LLVM_SUPPORT_RAW_SOCKET_STREAM_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/raw_ostream.h"
@@ -30,8 +31,8 @@ class raw_socket_stream;
/// Make sure that calls to WSAStartup and WSACleanup are balanced.
class WSABalancer {
public:
- WSABalancer();
- ~WSABalancer();
+ LLVM_ABI WSABalancer();
+ LLVM_ABI ~WSABalancer();
};
#endif // _WIN32
@@ -74,8 +75,8 @@ class ListeningSocket {
#endif // _WIN32
public:
- ~ListeningSocket();
- ListeningSocket(ListeningSocket &&LS);
+ LLVM_ABI ~ListeningSocket();
+ LLVM_ABI ListeningSocket(ListeningSocket &&LS);
ListeningSocket(const ListeningSocket &LS) = delete;
ListeningSocket &operator=(const ListeningSocket &) = delete;
@@ -87,7 +88,7 @@ public:
/// a blocking call to ::poll to return.
///
/// Once shutdown is called there is no way to reinitialize ListeningSocket.
- void shutdown();
+ LLVM_ABI void shutdown();
/// Accepts an incoming connection on the listening socket. This method can
/// optionally either block until a connection is available or timeout after a
@@ -98,7 +99,7 @@ public:
/// \param Timeout An optional timeout duration in milliseconds. Setting
/// Timeout to a negative number causes ::accept to block indefinitely
///
- Expected<std::unique_ptr<raw_socket_stream>> accept(
+ LLVM_ABI Expected<std::unique_ptr<raw_socket_stream>> accept(
const std::chrono::milliseconds &Timeout = std::chrono::milliseconds(-1));
/// Creates a listening socket bound to the specified file system path.
@@ -108,7 +109,7 @@ public:
/// \param SocketPath The file system path where the socket will be created
/// \param MaxBacklog The max number of connections in a socket's backlog
///
- static Expected<ListeningSocket> createUnix(
+ LLVM_ABI static Expected<ListeningSocket> createUnix(
StringRef SocketPath,
int MaxBacklog = llvm::hardware_concurrency().compute_thread_count());
};
@@ -117,7 +118,7 @@ public:
// raw_socket_stream
//===----------------------------------------------------------------------===//
-class raw_socket_stream : public raw_fd_stream {
+class LLVM_ABI raw_socket_stream : public raw_fd_stream {
uint64_t current_pos() const override { return 0; }
#ifdef _WIN32
WSABalancer _;
diff --git a/llvm/include/llvm/Support/thread.h b/llvm/include/llvm/Support/thread.h
index ef2fba8..16e322b 100644
--- a/llvm/include/llvm/Support/thread.h
+++ b/llvm/include/llvm/Support/thread.h
@@ -17,6 +17,7 @@
#define LLVM_SUPPORT_THREAD_H
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
#include <optional>
#include <tuple>
#include <utility>
@@ -69,7 +70,7 @@ public:
}
#endif
- static const std::optional<unsigned> DefaultStackSize;
+ LLVM_ABI static const std::optional<unsigned> DefaultStackSize;
thread() : Thread(native_handle_type()) {}
thread(thread &&Other) noexcept
@@ -115,13 +116,13 @@ private:
native_handle_type Thread;
};
-thread::native_handle_type
+LLVM_ABI thread::native_handle_type
llvm_execute_on_thread_impl(thread::start_routine_type ThreadFunc, void *Arg,
std::optional<unsigned> StackSizeInBytes);
-void llvm_thread_join_impl(thread::native_handle_type Thread);
-void llvm_thread_detach_impl(thread::native_handle_type Thread);
-thread::id llvm_thread_get_id_impl(thread::native_handle_type Thread);
-thread::id llvm_thread_get_current_id_impl();
+LLVM_ABI void llvm_thread_join_impl(thread::native_handle_type Thread);
+LLVM_ABI void llvm_thread_detach_impl(thread::native_handle_type Thread);
+LLVM_ABI thread::id llvm_thread_get_id_impl(thread::native_handle_type Thread);
+LLVM_ABI thread::id llvm_thread_get_current_id_impl();
template <class Function, class... Args>
thread::thread(std::optional<unsigned> StackSizeInBytes, Function &&f,
diff --git a/llvm/include/llvm/Support/xxhash.h b/llvm/include/llvm/Support/xxhash.h
index 5f8a7ab..b521adb 100644
--- a/llvm/include/llvm/Support/xxhash.h
+++ b/llvm/include/llvm/Support/xxhash.h
@@ -40,13 +40,14 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
-uint64_t xxHash64(llvm::StringRef Data);
-uint64_t xxHash64(llvm::ArrayRef<uint8_t> Data);
+LLVM_ABI uint64_t xxHash64(llvm::StringRef Data);
+LLVM_ABI uint64_t xxHash64(llvm::ArrayRef<uint8_t> Data);
-uint64_t xxh3_64bits(ArrayRef<uint8_t> data);
+LLVM_ABI uint64_t xxh3_64bits(ArrayRef<uint8_t> data);
inline uint64_t xxh3_64bits(StringRef data) {
return xxh3_64bits(ArrayRef(data.bytes_begin(), data.size()));
}
@@ -72,7 +73,7 @@ struct XXH128_hash_t {
};
/// XXH3's 128-bit variant.
-XXH128_hash_t xxh3_128bits(ArrayRef<uint8_t> data);
+LLVM_ABI XXH128_hash_t xxh3_128bits(ArrayRef<uint8_t> data);
} // namespace llvm
diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h
index 982cc25..7205617 100644
--- a/llvm/include/llvm/TableGen/Record.h
+++ b/llvm/include/llvm/TableGen/Record.h
@@ -258,7 +258,7 @@ public:
void Profile(FoldingSetNodeID &ID) const;
ArrayRef<const Record *> getClasses() const {
- return ArrayRef(getTrailingObjects<const Record *>(), NumClasses);
+ return getTrailingObjects(NumClasses);
}
using const_record_iterator = const Record *const *;
@@ -632,9 +632,7 @@ public:
const Init *resolveReferences(Resolver &R) const override;
- ArrayRef<const Init *> getBits() const {
- return ArrayRef(getTrailingObjects<const Init *>(), NumBits);
- }
+ ArrayRef<const Init *> getBits() const { return getTrailingObjects(NumBits); }
const Init *getBit(unsigned Bit) const override { return getBits()[Bit]; }
};
@@ -745,9 +743,7 @@ public:
return "[{" + Value.str() + "}]";
}
- std::string getAsUnquotedString() const override {
- return std::string(Value);
- }
+ std::string getAsUnquotedString() const override { return Value.str(); }
const Init *getBit(unsigned Bit) const override {
llvm_unreachable("Illegal bit reference off string");
@@ -783,7 +779,7 @@ public:
void Profile(FoldingSetNodeID &ID) const;
ArrayRef<const Init *> getValues() const {
- return ArrayRef(getTrailingObjects<const Init *>(), NumValues);
+ return ArrayRef(getTrailingObjects(), NumValues);
}
const Init *getElement(unsigned Index) const { return getValues()[Index]; }
@@ -1026,10 +1022,6 @@ class CondOpInit final : public TypedInit,
CondOpInit(ArrayRef<const Init *> Conds, ArrayRef<const Init *> Values,
const RecTy *Type);
- size_t numTrailingObjects(OverloadToken<Init *>) const {
- return 2*NumConds;
- }
-
public:
CondOpInit(const CondOpInit &) = delete;
CondOpInit &operator=(const CondOpInit &) = delete;
@@ -1053,11 +1045,11 @@ public:
const Init *getVal(unsigned Num) const { return getVals()[Num]; }
ArrayRef<const Init *> getConds() const {
- return ArrayRef(getTrailingObjects<const Init *>(), NumConds);
+ return getTrailingObjects(NumConds);
}
ArrayRef<const Init *> getVals() const {
- return ArrayRef(getTrailingObjects<const Init *>() + NumConds, NumConds);
+ return ArrayRef(getTrailingObjects() + NumConds, NumConds);
}
const Init *Fold(const Record *CurRec) const;
@@ -1375,7 +1367,7 @@ public:
bool args_empty() const { return NumArgs == 0; }
ArrayRef<const ArgumentInit *> args() const {
- return ArrayRef(getTrailingObjects<const ArgumentInit *>(), NumArgs);
+ return getTrailingObjects(NumArgs);
}
const Init *getBit(unsigned Bit) const override {
@@ -1488,11 +1480,11 @@ public:
}
ArrayRef<const Init *> getArgs() const {
- return ArrayRef(getTrailingObjects<const Init *>(), NumArgs);
+ return getTrailingObjects<const Init *>(NumArgs);
}
ArrayRef<const StringInit *> getArgNames() const {
- return ArrayRef(getTrailingObjects<const StringInit *>(), NumArgs);
+ return getTrailingObjects<const StringInit *>(NumArgs);
}
const Init *resolveReferences(Resolver &R) const override;
diff --git a/llvm/lib/Analysis/DXILResource.cpp b/llvm/lib/Analysis/DXILResource.cpp
index 1602b24..baba7d2 100644
--- a/llvm/lib/Analysis/DXILResource.cpp
+++ b/llvm/lib/Analysis/DXILResource.cpp
@@ -955,9 +955,7 @@ void DXILResourceBindingInfo::populate(Module &M, DXILResourceTypeMap &DRTM) {
// for each binding type and used spaces. Bindings are sorted by resource
// class, space, and lower bound register slot.
BindingSpaces *BS = &SRVSpaces;
- for (unsigned I = 0, E = Bindings.size(); I != E; ++I) {
- Binding &B = Bindings[I];
-
+ for (const Binding &B : Bindings) {
if (BS->RC != B.RC)
// move to the next resource class spaces
BS = &getBindingSpaces(B.RC);
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 438669d..f4e1d63 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -158,12 +158,11 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
// If there is an entry in the map return the SCEV of the pointer with the
// symbolic stride replaced by one.
- DenseMap<Value *, const SCEV *>::const_iterator SI = PtrToStride.find(Ptr);
- if (SI == PtrToStride.end())
+ const SCEV *StrideSCEV = PtrToStride.lookup(Ptr);
+ if (!StrideSCEV)
// For a non-symbolic stride, just return the original expression.
return OrigSCEV;
- const SCEV *StrideSCEV = SI->second;
// Note: This assert is both overly strong and overly weak. The actual
// invariant here is that StrideSCEV should be loop invariant. The only
// such invariant strides we happen to speculate right now are unknowns
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 3f96142..69714a1 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -15867,49 +15867,45 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
}
const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end()) {
- // If we didn't find the extact ZExt expr in the map, check if there's
- // an entry for a smaller ZExt we can use instead.
- Type *Ty = Expr->getType();
- const SCEV *Op = Expr->getOperand(0);
- unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
- while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
- Bitwidth > Op->getType()->getScalarSizeInBits()) {
- Type *NarrowTy = IntegerType::get(SE.getContext(), Bitwidth);
- auto *NarrowExt = SE.getZeroExtendExpr(Op, NarrowTy);
- auto I = Map.find(NarrowExt);
- if (I != Map.end())
- return SE.getZeroExtendExpr(I->second, Ty);
- Bitwidth = Bitwidth / 2;
- }
+ if (const SCEV *S = Map.lookup(Expr))
+ return S;
- return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr(
- Expr);
+ // If we didn't find the extact ZExt expr in the map, check if there's
+ // an entry for a smaller ZExt we can use instead.
+ Type *Ty = Expr->getType();
+ const SCEV *Op = Expr->getOperand(0);
+ unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
+ while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
+ Bitwidth > Op->getType()->getScalarSizeInBits()) {
+ Type *NarrowTy = IntegerType::get(SE.getContext(), Bitwidth);
+ auto *NarrowExt = SE.getZeroExtendExpr(Op, NarrowTy);
+ auto I = Map.find(NarrowExt);
+ if (I != Map.end())
+ return SE.getZeroExtendExpr(I->second, Ty);
+ Bitwidth = Bitwidth / 2;
}
- return I->second;
+
+ return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr(
+ Expr);
}
const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end())
- return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSignExtendExpr(
- Expr);
- return I->second;
+ if (const SCEV *S = Map.lookup(Expr))
+ return S;
+ return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSignExtendExpr(
+ Expr);
}
const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end())
- return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitUMinExpr(Expr);
- return I->second;
+ if (const SCEV *S = Map.lookup(Expr))
+ return S;
+ return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitUMinExpr(Expr);
}
const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
- auto I = Map.find(Expr);
- if (I == Map.end())
- return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSMinExpr(Expr);
- return I->second;
+ if (const SCEV *S = Map.lookup(Expr))
+ return S;
+ return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitSMinExpr(Expr);
}
const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 3945dd4..c8b5683 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -205,6 +205,14 @@ static void initializeLibCalls(TargetLibraryInfoImpl &TLI, const Triple &T,
return;
}
+ // DXIL does not support libcalls, and disabling them here prevents a number
+ // of passes from introducing libcalls into DXIL which would otherwise
+ // complicate lowering/legalization
+ if (T.isDXIL()) {
+ TLI.disableAllFunctions();
+ return;
+ }
+
// memset_pattern{4,8,16} is only available on iOS 3.0 and Mac OS X 10.5 and
// later. All versions of watchOS support it.
if (T.isMacOSX()) {
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index b7e001d..0f85739 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1395,9 +1395,8 @@ bool TargetTransformInfo::preferAlternateOpcodeVectorization() const {
return TTIImpl->preferAlternateOpcodeVectorization();
}
-bool TargetTransformInfo::preferPredicatedReductionSelect(unsigned Opcode,
- Type *Ty) const {
- return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty);
+bool TargetTransformInfo::preferPredicatedReductionSelect() const {
+ return TTIImpl->preferPredicatedReductionSelect();
}
bool TargetTransformInfo::preferEpilogueVectorization() const {
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 1e07f06..64f9638 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -542,7 +542,7 @@ private:
: Value(Ty, SubclassID), Opcode(Info.Opcode), Flags(Info.Flags),
NumOperands(OpIDs.size()), BlockAddressBB(Info.BlockAddressBB),
SrcElemTy(Info.SrcElemTy), InRange(Info.InRange) {
- llvm::uninitialized_copy(OpIDs, getTrailingObjects<unsigned>());
+ llvm::uninitialized_copy(OpIDs, getTrailingObjects());
}
BitcodeConstant &operator=(const BitcodeConstant &) = delete;
@@ -559,7 +559,7 @@ public:
static bool classof(const Value *V) { return V->getValueID() == SubclassID; }
ArrayRef<unsigned> getOperandIDs() const {
- return ArrayRef(getTrailingObjects<unsigned>(), NumOperands);
+ return ArrayRef(getTrailingObjects(), NumOperands);
}
std::optional<ConstantRange> getInRange() const {
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 158b0a6..1a15c51 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -2659,9 +2659,9 @@ void ModuleBitcodeWriter::writeFunctionMetadataAttachment(const Function &F) {
Record.push_back(VE.getInstructionID(&I));
- for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
- Record.push_back(MDs[i].first);
- Record.push_back(VE.getMetadataID(MDs[i].second));
+ for (const auto &[ID, MD] : MDs) {
+ Record.push_back(ID);
+ Record.push_back(VE.getMetadataID(MD));
}
Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0);
Record.clear();
@@ -2800,7 +2800,7 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
unsigned(IA->getDialect() & 1) << 2 | unsigned(IA->canThrow()) << 3);
// Add the asm string.
- const std::string &AsmStr = IA->getAsmString();
+ StringRef AsmStr = IA->getAsmString();
Record.push_back(AsmStr.size());
Record.append(AsmStr.begin(), AsmStr.end());
diff --git a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index 1fdb808..e133abe 100644
--- a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -485,8 +485,8 @@ ValueEnumerator::ValueEnumerator(const Module &M,
// Enumerate metadata attached with this instruction.
MDs.clear();
I.getAllMetadataOtherThanDebugLoc(MDs);
- for (unsigned i = 0, e = MDs.size(); i != e; ++i)
- EnumerateMetadata(&F, MDs[i].second);
+ for (const auto &MD : MDs)
+ EnumerateMetadata(&F, MD.second);
// Don't enumerate the location directly -- it has a special record
// type -- but enumerate its operands.
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index b331854..010c3d6 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -489,12 +489,17 @@ void DwarfDebug::addSubprogramNames(
if (SP->getName() != "")
addAccelName(Unit, NameTableKind, SP->getName(), Die);
+ // We drop the mangling escape prefix when emitting the DW_AT_linkage_name. So
+ // ensure we don't include it when inserting into the accelerator tables.
+ llvm::StringRef LinkageName =
+ GlobalValue::dropLLVMManglingEscape(SP->getLinkageName());
+
// If the linkage name is different than the name, go ahead and output that as
// well into the name table. Only do that if we are going to actually emit
// that name.
- if (SP->getLinkageName() != "" && SP->getName() != SP->getLinkageName() &&
+ if (LinkageName != "" && SP->getName() != LinkageName &&
(useAllLinkageNames() || InfoHolder.getAbstractScopeDIEs().lookup(SP)))
- addAccelName(Unit, NameTableKind, SP->getLinkageName(), Die);
+ addAccelName(Unit, NameTableKind, LinkageName, Die);
// If this is an Objective-C selector name add it to the ObjC accelerator
// too.
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 5f04e8b..5a1ac5d 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -1013,6 +1013,7 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) {
constructEnumTypeDIE(Buffer, CTy);
break;
case dwarf::DW_TAG_variant_part:
+ case dwarf::DW_TAG_variant:
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_union_type:
case dwarf::DW_TAG_class_type:
@@ -1066,7 +1067,17 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) {
addDiscriminant(Variant, CI,
DD->isUnsignedDIType(Discriminator->getBaseType()));
}
- constructMemberDIE(Variant, DDTy);
+ // If the variant holds a composite type with tag
+ // DW_TAG_variant, inline those members into the variant
+ // DIE.
+ if (auto *Composite =
+ dyn_cast_or_null<DICompositeType>(DDTy->getBaseType());
+ Composite != nullptr &&
+ Composite->getTag() == dwarf::DW_TAG_variant) {
+ constructTypeDIE(Variant, Composite);
+ } else {
+ constructMemberDIE(Variant, DDTy);
+ }
} else {
constructMemberDIE(Buffer, DDTy);
}
diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index 81f25b2..fbbbea6 100644
--- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -297,7 +297,7 @@ bool InlineAsmLowering::lowerInlineAsm(
// Create the MachineInstr, but don't insert it yet since input
// operands still need to insert instructions before this one
auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM)
- .addExternalSymbol(IA->getAsmString().c_str())
+ .addExternalSymbol(IA->getAsmString().data())
.addImm(ExtraInfo.get());
// Starting from this operand: flag followed by register(s) will be added as
diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index fbc0264..59cd0dc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1173,7 +1173,7 @@ bool FastISel::selectCall(const User *I) {
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpcode::INLINEASM));
- MIB.addExternalSymbol(IA->getAsmString().c_str());
+ MIB.addExternalSymbol(IA->getAsmString().data());
MIB.addImm(ExtraInfo);
const MDNode *SrcLoc = Call->getMetadata("srcloc");
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 9d138d3..8e74a07 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -10017,7 +10017,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
std::vector<SDValue> AsmNodeOperands;
AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
- IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
+ IA->getAsmString().data(), TLI.getProgramPointerTy(DAG.getDataLayout())));
// If we have a !srcloc metadata node associated with it, we want to attach
// this to the ultimately generated inline asm machineinstr. To do this, we
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index ba34c72..856e9d8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -4462,11 +4462,14 @@ static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1,
unsigned BitWidth = N0.getScalarValueSizeInBits();
auto *ShAmtC = isConstOrConstSplat(N0.getOperand(2));
- if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth))
+ if (!ShAmtC)
+ return SDValue();
+
+ uint64_t ShAmt = ShAmtC->getAPIntValue().urem(BitWidth);
+ if (ShAmt == 0)
return SDValue();
// Canonicalize fshr as fshl to reduce pattern-matching.
- unsigned ShAmt = ShAmtC->getZExtValue();
if (N0.getOpcode() == ISD::FSHR)
ShAmt = BitWidth - ShAmt;
diff --git a/llvm/lib/DebugInfo/GSYM/CMakeLists.txt b/llvm/lib/DebugInfo/GSYM/CMakeLists.txt
index c27d648..724b5b2 100644
--- a/llvm/lib/DebugInfo/GSYM/CMakeLists.txt
+++ b/llvm/lib/DebugInfo/GSYM/CMakeLists.txt
@@ -4,6 +4,7 @@ add_llvm_component_library(LLVMDebugInfoGSYM
FileWriter.cpp
FunctionInfo.cpp
GsymCreator.cpp
+ GsymDIContext.cpp
GsymReader.cpp
InlineInfo.cpp
LineTable.cpp
diff --git a/llvm/lib/DebugInfo/GSYM/GsymDIContext.cpp b/llvm/lib/DebugInfo/GSYM/GsymDIContext.cpp
new file mode 100644
index 0000000..68024a9
--- /dev/null
+++ b/llvm/lib/DebugInfo/GSYM/GsymDIContext.cpp
@@ -0,0 +1,166 @@
+//===-- GsymDIContext.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===/
+
+#include "llvm/DebugInfo/GSYM/GsymDIContext.h"
+
+#include "llvm/DebugInfo/GSYM/GsymReader.h"
+#include "llvm/Support/Path.h"
+
+using namespace llvm;
+using namespace llvm::gsym;
+
+GsymDIContext::GsymDIContext(std::unique_ptr<GsymReader> Reader)
+ : DIContext(CK_GSYM), Reader(std::move(Reader)) {}
+
+void GsymDIContext::dump(raw_ostream &OS, DIDumpOptions DumpOpts) {}
+
+static bool fillLineInfoFromLocation(const SourceLocation &Location,
+ DILineInfoSpecifier Specifier,
+ DILineInfo &LineInfo) {
+ // FIXME Demangle in case of DINameKind::ShortName
+ if (Specifier.FNKind != DINameKind::None) {
+ LineInfo.FunctionName = Location.Name.str();
+ }
+
+ switch (Specifier.FLIKind) {
+ case DILineInfoSpecifier::FileLineInfoKind::RelativeFilePath:
+ // We have no information to determine the relative path, so we fall back to
+ // returning the absolute path.
+ case DILineInfoSpecifier::FileLineInfoKind::RawValue:
+ case DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath:
+ if (Location.Dir.empty()) {
+ if (Location.Base.empty())
+ LineInfo.FileName = DILineInfo::BadString;
+ else
+ LineInfo.FileName = Location.Base.str();
+ } else {
+ SmallString<128> Path(Location.Dir);
+ sys::path::append(Path, Location.Base);
+ LineInfo.FileName = static_cast<std::string>(Path);
+ }
+ break;
+
+ case DILineInfoSpecifier::FileLineInfoKind::BaseNameOnly:
+ LineInfo.FileName = Location.Base.str();
+ break;
+
+ default:
+ return false;
+ }
+ LineInfo.Line = Location.Line;
+
+ // We don't have information in GSYM to fill any of the Source, Column,
+ // StartFileName or StartLine attributes.
+
+ return true;
+}
+
+std::optional<DILineInfo>
+GsymDIContext::getLineInfoForAddress(object::SectionedAddress Address,
+ DILineInfoSpecifier Specifier) {
+ if (Address.SectionIndex != object::SectionedAddress::UndefSection)
+ return {};
+
+ auto ResultOrErr = Reader->lookup(Address.Address);
+
+ if (!ResultOrErr) {
+ consumeError(ResultOrErr.takeError());
+ return {};
+ }
+
+ const auto &Result = *ResultOrErr;
+
+ DILineInfo LineInfo;
+
+ if (Result.Locations.empty()) {
+ // No debug info for this, we just had a symbol from the symbol table.
+
+ // FIXME Demangle in case of DINameKind::ShortName
+ if (Specifier.FNKind != DINameKind::None)
+ LineInfo.FunctionName = Result.FuncName.str();
+ } else if (!fillLineInfoFromLocation(Result.Locations.front(), Specifier,
+ LineInfo))
+ return {};
+
+ LineInfo.StartAddress = Result.FuncRange.start();
+
+ return LineInfo;
+}
+
+std::optional<DILineInfo>
+GsymDIContext::getLineInfoForDataAddress(object::SectionedAddress Address) {
+ // We can't implement this, there's no such information in the GSYM file.
+
+ return {};
+}
+
+DILineInfoTable
+GsymDIContext::getLineInfoForAddressRange(object::SectionedAddress Address,
+ uint64_t Size,
+ DILineInfoSpecifier Specifier) {
+ if (Size == 0)
+ return DILineInfoTable();
+
+ if (Address.SectionIndex != llvm::object::SectionedAddress::UndefSection)
+ return DILineInfoTable();
+
+ if (auto FuncInfoOrErr = Reader->getFunctionInfo(Address.Address)) {
+ DILineInfoTable Table;
+ if (FuncInfoOrErr->OptLineTable) {
+ const gsym::LineTable &LT = *FuncInfoOrErr->OptLineTable;
+ const uint64_t StartAddr = Address.Address;
+ const uint64_t EndAddr = Address.Address + Size;
+ for (const auto &LineEntry : LT) {
+ if (StartAddr <= LineEntry.Addr && LineEntry.Addr < EndAddr) {
+ // Use LineEntry.Addr, LineEntry.File (which is a file index into the
+ // files tables from the GsymReader), and LineEntry.Line (source line
+ // number) to add stuff to the DILineInfoTable
+ }
+ }
+ }
+ return Table;
+ } else {
+ consumeError(FuncInfoOrErr.takeError());
+ return DILineInfoTable();
+ }
+}
+
+DIInliningInfo
+GsymDIContext::getInliningInfoForAddress(object::SectionedAddress Address,
+ DILineInfoSpecifier Specifier) {
+ auto ResultOrErr = Reader->lookup(Address.Address);
+
+ if (!ResultOrErr)
+ return {};
+
+ const auto &Result = *ResultOrErr;
+
+ DIInliningInfo InlineInfo;
+
+ for (const auto &Location : Result.Locations) {
+ DILineInfo LineInfo;
+
+ if (!fillLineInfoFromLocation(Location, Specifier, LineInfo))
+ return {};
+
+ // Hm, that's probably something that should only be filled in the first or
+ // last frame?
+ LineInfo.StartAddress = Result.FuncRange.start();
+
+ InlineInfo.addFrame(LineInfo);
+ }
+
+ return InlineInfo;
+}
+
+std::vector<DILocal>
+GsymDIContext::getLocalsForAddress(object::SectionedAddress Address) {
+ // We can't implement this, there's no such information in the GSYM file.
+
+ return {};
+}
diff --git a/llvm/lib/DebugInfo/Symbolize/CMakeLists.txt b/llvm/lib/DebugInfo/Symbolize/CMakeLists.txt
index 29f62bf..7aef3b0 100644
--- a/llvm/lib/DebugInfo/Symbolize/CMakeLists.txt
+++ b/llvm/lib/DebugInfo/Symbolize/CMakeLists.txt
@@ -10,6 +10,7 @@ add_llvm_component_library(LLVMSymbolize
LINK_COMPONENTS
DebugInfoDWARF
+ DebugInfoGSYM
DebugInfoPDB
DebugInfoBTF
Object
diff --git a/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp b/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp
index 1d8217a..78a1421 100644
--- a/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp
+++ b/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp
@@ -15,6 +15,8 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/DebugInfo/BTF/BTFContext.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/DebugInfo/GSYM/GsymDIContext.h"
+#include "llvm/DebugInfo/GSYM/GsymReader.h"
#include "llvm/DebugInfo/PDB/PDB.h"
#include "llvm/DebugInfo/PDB/PDBContext.h"
#include "llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h"
@@ -498,6 +500,34 @@ bool LLVMSymbolizer::getOrFindDebugBinary(const ArrayRef<uint8_t> BuildID,
return false;
}
+std::string LLVMSymbolizer::lookUpGsymFile(const std::string &Path) {
+ if (Opts.DisableGsym)
+ return {};
+
+ auto CheckGsymFile = [](const llvm::StringRef &GsymPath) {
+ sys::fs::file_status Status;
+ std::error_code EC = llvm::sys::fs::status(GsymPath, Status);
+ return !EC && !llvm::sys::fs::is_directory(Status);
+ };
+
+ // First, look beside the binary file
+ if (const auto GsymPath = Path + ".gsym"; CheckGsymFile(GsymPath))
+ return GsymPath;
+
+ // Then, look in the directories specified by GsymFileDirectory
+
+ for (const auto &Directory : Opts.GsymFileDirectory) {
+ SmallString<16> GsymPath = llvm::StringRef{Directory};
+ llvm::sys::path::append(GsymPath,
+ llvm::sys::path::filename(Path) + ".gsym");
+
+ if (CheckGsymFile(GsymPath))
+ return static_cast<std::string>(GsymPath);
+ }
+
+ return {};
+}
+
Expected<LLVMSymbolizer::ObjectPair>
LLVMSymbolizer::getOrCreateObjectPair(const std::string &Path,
const std::string &ArchName) {
@@ -634,30 +664,48 @@ LLVMSymbolizer::getOrCreateModuleInfo(StringRef ModuleName) {
std::unique_ptr<DIContext> Context;
// If this is a COFF object containing PDB info and not containing DWARF
// section, use a PDBContext to symbolize. Otherwise, use DWARF.
- if (auto CoffObject = dyn_cast<COFFObjectFile>(Objects.first)) {
- const codeview::DebugInfo *DebugInfo;
- StringRef PDBFileName;
- auto EC = CoffObject->getDebugPDBInfo(DebugInfo, PDBFileName);
- // Use DWARF if there're DWARF sections.
- bool HasDwarf =
- llvm::any_of(Objects.first->sections(), [](SectionRef Section) -> bool {
- if (Expected<StringRef> SectionName = Section.getName())
- return SectionName.get() == ".debug_info";
- return false;
- });
- if (!EC && !HasDwarf && DebugInfo != nullptr && !PDBFileName.empty()) {
- using namespace pdb;
- std::unique_ptr<IPDBSession> Session;
-
- PDB_ReaderType ReaderType =
- Opts.UseDIA ? PDB_ReaderType::DIA : PDB_ReaderType::Native;
- if (auto Err = loadDataForEXE(ReaderType, Objects.first->getFileName(),
- Session)) {
- Modules.emplace(ModuleName, std::unique_ptr<SymbolizableModule>());
- // Return along the PDB filename to provide more context
- return createFileError(PDBFileName, std::move(Err));
+ // Create a DIContext to symbolize as follows:
+ // - If there is a GSYM file, create a GsymDIContext.
+ // - Otherwise, if this is a COFF object containing PDB info, create a
+ // PDBContext.
+ // - Otherwise, create a DWARFContext.
+ const auto GsymFile = lookUpGsymFile(BinaryName.str());
+ if (!GsymFile.empty()) {
+ auto ReaderOrErr = gsym::GsymReader::openFile(GsymFile);
+
+ if (ReaderOrErr) {
+ std::unique_ptr<gsym::GsymReader> Reader =
+ std::make_unique<gsym::GsymReader>(std::move(*ReaderOrErr));
+
+ Context = std::make_unique<gsym::GsymDIContext>(std::move(Reader));
+ }
+ }
+ if (!Context) {
+ if (auto CoffObject = dyn_cast<COFFObjectFile>(Objects.first)) {
+ const codeview::DebugInfo *DebugInfo;
+ StringRef PDBFileName;
+ auto EC = CoffObject->getDebugPDBInfo(DebugInfo, PDBFileName);
+ // Use DWARF if there're DWARF sections.
+ bool HasDwarf = llvm::any_of(
+ Objects.first->sections(), [](SectionRef Section) -> bool {
+ if (Expected<StringRef> SectionName = Section.getName())
+ return SectionName.get() == ".debug_info";
+ return false;
+ });
+ if (!EC && !HasDwarf && DebugInfo != nullptr && !PDBFileName.empty()) {
+ using namespace pdb;
+ std::unique_ptr<IPDBSession> Session;
+
+ PDB_ReaderType ReaderType =
+ Opts.UseDIA ? PDB_ReaderType::DIA : PDB_ReaderType::Native;
+ if (auto Err = loadDataForEXE(ReaderType, Objects.first->getFileName(),
+ Session)) {
+ Modules.emplace(ModuleName, std::unique_ptr<SymbolizableModule>());
+ // Return along the PDB filename to provide more context
+ return createFileError(PDBFileName, std::move(Err));
+ }
+ Context.reset(new PDBContext(*CoffObject, std::move(Session)));
}
- Context.reset(new PDBContext(*CoffObject, std::move(Session)));
}
}
if (!Context)
diff --git a/llvm/lib/Debuginfod/Debuginfod.cpp b/llvm/lib/Debuginfod/Debuginfod.cpp
index db316a1..12f817c 100644
--- a/llvm/lib/Debuginfod/Debuginfod.cpp
+++ b/llvm/lib/Debuginfod/Debuginfod.cpp
@@ -245,8 +245,7 @@ static SmallVector<std::string, 0> getHeaders() {
uint64_t LineNumber = 0;
for (StringRef Line : llvm::split((*HeadersFile)->getBuffer(), '\n')) {
LineNumber++;
- if (!Line.empty() && Line.back() == '\r')
- Line = Line.drop_back();
+ Line.consume_back("\r");
if (!isHeader(Line)) {
if (!all_of(Line, llvm::isSpace))
WithColor::warning()
diff --git a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
index 5351239..cd3c6f8 100644
--- a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
+++ b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
@@ -144,6 +144,22 @@ void DescriptorTableClause::dump(raw_ostream &OS) const {
OS << ", flags = " << Flags << ")";
}
+void dumpRootElements(raw_ostream &OS, ArrayRef<RootElement> Elements) {
+ OS << "RootElements{";
+ bool First = true;
+ for (const RootElement &Element : Elements) {
+ if (!First)
+ OS << ",";
+ OS << " ";
+ First = false;
+ if (const auto &Clause = std::get_if<DescriptorTableClause>(&Element))
+ Clause->dump(OS);
+ if (const auto &Table = std::get_if<DescriptorTable>(&Element))
+ Table->dump(OS);
+ }
+ OS << "}";
+}
+
} // namespace rootsig
} // namespace hlsl
} // namespace llvm
diff --git a/llvm/lib/FuzzMutate/IRMutator.cpp b/llvm/lib/FuzzMutate/IRMutator.cpp
index 7e28f58..672c666 100644
--- a/llvm/lib/FuzzMutate/IRMutator.cpp
+++ b/llvm/lib/FuzzMutate/IRMutator.cpp
@@ -374,7 +374,8 @@ void InsertFunctionStrategy::mutate(BasicBlock &BB, RandomIRBuilder &IB) {
return T->isMetadataTy() || T->isTokenTy();
};
if (!F || IsUnsupportedTy(F->getReturnType()) ||
- any_of(F->getFunctionType()->params(), IsUnsupportedTy)) {
+ any_of(F->getFunctionType()->params(), IsUnsupportedTy) ||
+ !isCallableCC(F->getCallingConv())) {
F = IB.createFunctionDeclaration(*M);
}
@@ -390,6 +391,7 @@ void InsertFunctionStrategy::mutate(BasicBlock &BB, RandomIRBuilder &IB) {
BasicBlock::iterator InsertPt) {
StringRef Name = isRetVoid ? nullptr : "C";
CallInst *Call = CallInst::Create(FTy, F, Srcs, Name, InsertPt);
+ Call->setCallingConv(F->getCallingConv());
// Don't return this call inst if it return void as it can't be sinked.
return isRetVoid ? nullptr : Call;
};
diff --git a/llvm/lib/IR/AttributeImpl.h b/llvm/lib/IR/AttributeImpl.h
index 98d1bad..707c820 100644
--- a/llvm/lib/IR/AttributeImpl.h
+++ b/llvm/lib/IR/AttributeImpl.h
@@ -195,15 +195,12 @@ class StringAttributeImpl final
unsigned KindSize;
unsigned ValSize;
- size_t numTrailingObjects(OverloadToken<char>) const {
- return KindSize + 1 + ValSize + 1;
- }
public:
StringAttributeImpl(StringRef Kind, StringRef Val = StringRef())
: AttributeImpl(StringAttrEntry), KindSize(Kind.size()),
ValSize(Val.size()) {
- char *TrailingString = getTrailingObjects<char>();
+ char *TrailingString = getTrailingObjects();
// Some users rely on zero-termination.
llvm::copy(Kind, TrailingString);
TrailingString[KindSize] = '\0';
@@ -212,10 +209,10 @@ public:
}
StringRef getStringKind() const {
- return StringRef(getTrailingObjects<char>(), KindSize);
+ return StringRef(getTrailingObjects(), KindSize);
}
StringRef getStringValue() const {
- return StringRef(getTrailingObjects<char>() + KindSize + 1, ValSize);
+ return StringRef(getTrailingObjects() + KindSize + 1, ValSize);
}
static size_t totalSizeToAlloc(StringRef Kind, StringRef Val) {
@@ -250,25 +247,22 @@ class ConstantRangeListAttributeImpl final
friend TrailingObjects;
unsigned Size;
- size_t numTrailingObjects(OverloadToken<ConstantRange>) const { return Size; }
public:
ConstantRangeListAttributeImpl(Attribute::AttrKind Kind,
ArrayRef<ConstantRange> Val)
: EnumAttributeImpl(ConstantRangeListAttrEntry, Kind), Size(Val.size()) {
assert(Size > 0);
- ConstantRange *TrailingCR = getTrailingObjects<ConstantRange>();
- llvm::uninitialized_copy(Val, TrailingCR);
+ llvm::uninitialized_copy(Val, getTrailingObjects());
}
~ConstantRangeListAttributeImpl() {
- ConstantRange *TrailingCR = getTrailingObjects<ConstantRange>();
- for (unsigned I = 0; I != Size; ++I)
- TrailingCR[I].~ConstantRange();
+ for (ConstantRange &CR : getTrailingObjects(Size))
+ CR.~ConstantRange();
}
ArrayRef<ConstantRange> getConstantRangeListValue() const {
- return ArrayRef(getTrailingObjects<ConstantRange>(), Size);
+ return getTrailingObjects(Size);
}
static size_t totalSizeToAlloc(ArrayRef<ConstantRange> Val) {
@@ -353,7 +347,7 @@ public:
using iterator = const Attribute *;
- iterator begin() const { return getTrailingObjects<Attribute>(); }
+ iterator begin() const { return getTrailingObjects(); }
iterator end() const { return begin() + NumAttrs; }
void Profile(FoldingSetNodeID &ID) const {
@@ -383,9 +377,6 @@ private:
/// Union of enum attributes available at any index.
AttributeBitSet AvailableSomewhereAttrs;
- // Helper fn for TrailingObjects class.
- size_t numTrailingObjects(OverloadToken<AttributeSet>) { return NumAttrSets; }
-
public:
AttributeListImpl(ArrayRef<AttributeSet> Sets);
@@ -407,7 +398,7 @@ public:
using iterator = const AttributeSet *;
- iterator begin() const { return getTrailingObjects<AttributeSet>(); }
+ iterator begin() const { return getTrailingObjects(); }
iterator end() const { return begin() + NumAttrSets; }
void Profile(FoldingSetNodeID &ID) const;
diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp
index 33ac8bf..5b0ceb3 100644
--- a/llvm/lib/IR/Attributes.cpp
+++ b/llvm/lib/IR/Attributes.cpp
@@ -1237,7 +1237,7 @@ LLVM_DUMP_METHOD void AttributeSet::dump() const {
AttributeSetNode::AttributeSetNode(ArrayRef<Attribute> Attrs)
: NumAttrs(Attrs.size()) {
// There's memory after the node where we can store the entries in.
- llvm::copy(Attrs, getTrailingObjects<Attribute>());
+ llvm::copy(Attrs, getTrailingObjects());
for (const auto &I : *this) {
if (I.isStringAttribute())
@@ -1423,7 +1423,7 @@ AttributeListImpl::AttributeListImpl(ArrayRef<AttributeSet> Sets)
assert(!Sets.empty() && "pointless AttributeListImpl");
// There's memory after the node where we can store the entries in.
- llvm::copy(Sets, getTrailingObjects<AttributeSet>());
+ llvm::copy(Sets, getTrailingObjects());
// Initialize AvailableFunctionAttrs and AvailableSomewhereAttrs
// summary bitsets.
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index a3cedcf..1954b44 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -520,10 +520,10 @@ LLVMValueRef LLVMGetInlineAsm(LLVMTypeRef Ty, const char *AsmString,
const char *LLVMGetInlineAsmAsmString(LLVMValueRef InlineAsmVal, size_t *Len) {
Value *Val = unwrap<Value>(InlineAsmVal);
- const std::string &AsmString = cast<InlineAsm>(Val)->getAsmString();
+ StringRef AsmString = cast<InlineAsm>(Val)->getAsmString();
- *Len = AsmString.length();
- return AsmString.c_str();
+ *Len = AsmString.size();
+ return AsmString.data();
}
const char *LLVMGetInlineAsmConstraintString(LLVMValueRef InlineAsmVal,
diff --git a/llvm/lib/IR/DIBuilder.cpp b/llvm/lib/IR/DIBuilder.cpp
index d9cc49f..90da9f3 100644
--- a/llvm/lib/IR/DIBuilder.cpp
+++ b/llvm/lib/IR/DIBuilder.cpp
@@ -444,6 +444,19 @@ DIDerivedType *DIBuilder::createVariantMemberType(
std::nullopt, std::nullopt, Flags, getConstantOrNull(Discriminant));
}
+DIDerivedType *DIBuilder::createVariantMemberType(DIScope *Scope,
+ DINodeArray Elements,
+ Constant *Discriminant,
+ DIType *Ty) {
+ auto *V = DICompositeType::get(VMContext, dwarf::DW_TAG_variant, {}, nullptr,
+ 0, getNonCompileUnitScope(Scope), {}, 0, 0, 0,
+ DINode::FlagZero, Elements, 0, {}, nullptr);
+
+ trackIfUnresolved(V);
+ return createVariantMemberType(Scope, {}, nullptr, 0, 0, 0, 0, Discriminant,
+ DINode::FlagZero, V);
+}
+
DIDerivedType *DIBuilder::createBitFieldMemberType(
DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
uint64_t SizeInBits, uint64_t OffsetInBits, uint64_t StorageOffsetInBits,
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 83c1264..2cfd382 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -1343,6 +1343,7 @@ void Verifier::visitDICompositeType(const DICompositeType &N) {
N.getTag() == dwarf::DW_TAG_enumeration_type ||
N.getTag() == dwarf::DW_TAG_class_type ||
N.getTag() == dwarf::DW_TAG_variant_part ||
+ N.getTag() == dwarf::DW_TAG_variant ||
N.getTag() == dwarf::DW_TAG_namelist,
"invalid tag", &N);
diff --git a/llvm/lib/ObjCopy/Archive.cpp b/llvm/lib/ObjCopy/Archive.cpp
index a221c64..a4e90ce 100644
--- a/llvm/lib/ObjCopy/Archive.cpp
+++ b/llvm/lib/ObjCopy/Archive.cpp
@@ -14,13 +14,13 @@
#include "llvm/Support/FileOutputBuffer.h"
#include "llvm/Support/SmallVectorMemoryBuffer.h"
-namespace llvm {
-namespace objcopy {
-
+using namespace llvm;
+using namespace llvm::objcopy;
using namespace llvm::object;
Expected<std::vector<NewArchiveMember>>
-createNewArchiveMembers(const MultiFormatConfig &Config, const Archive &Ar) {
+objcopy::createNewArchiveMembers(const MultiFormatConfig &Config,
+ const Archive &Ar) {
std::vector<NewArchiveMember> NewArchiveMembers;
Error Err = Error::success();
for (const Archive::Child &Child : Ar.children(Err)) {
@@ -94,8 +94,8 @@ static Error deepWriteArchive(StringRef ArcName,
return Error::success();
}
-Error executeObjcopyOnArchive(const MultiFormatConfig &Config,
- const object::Archive &Ar) {
+Error objcopy::executeObjcopyOnArchive(const MultiFormatConfig &Config,
+ const object::Archive &Ar) {
Expected<std::vector<NewArchiveMember>> NewArchiveMembersOrErr =
createNewArchiveMembers(Config, Ar);
if (!NewArchiveMembersOrErr)
@@ -107,6 +107,3 @@ Error executeObjcopyOnArchive(const MultiFormatConfig &Config,
Ar.kind(), CommonConfig.DeterministicArchives,
Ar.isThin());
}
-
-} // end namespace objcopy
-} // end namespace llvm
diff --git a/llvm/lib/ObjCopy/CommonConfig.cpp b/llvm/lib/ObjCopy/CommonConfig.cpp
index 1da0240..569e4c3 100644
--- a/llvm/lib/ObjCopy/CommonConfig.cpp
+++ b/llvm/lib/ObjCopy/CommonConfig.cpp
@@ -9,8 +9,8 @@
#include "llvm/ObjCopy/CommonConfig.h"
#include "llvm/Support/Errc.h"
-namespace llvm {
-namespace objcopy {
+using namespace llvm;
+using namespace llvm::objcopy;
Expected<NameOrPattern>
NameOrPattern::create(StringRef Pattern, MatchStyle MS,
@@ -47,6 +47,3 @@ NameOrPattern::create(StringRef Pattern, MatchStyle MS,
}
llvm_unreachable("Unhandled llvm.objcopy.MatchStyle enum");
}
-
-} // end namespace objcopy
-} // end namespace llvm
diff --git a/llvm/lib/ObjCopy/ConfigManager.cpp b/llvm/lib/ObjCopy/ConfigManager.cpp
index 79bbb28..9a81b51 100644
--- a/llvm/lib/ObjCopy/ConfigManager.cpp
+++ b/llvm/lib/ObjCopy/ConfigManager.cpp
@@ -10,8 +10,8 @@
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
-namespace llvm {
-namespace objcopy {
+using namespace llvm;
+using namespace llvm::objcopy;
Expected<const COFFConfig &> ConfigManager::getCOFFConfig() const {
if (!Common.SplitDWO.empty() || !Common.SymbolsPrefix.empty() ||
@@ -107,6 +107,3 @@ Expected<const XCOFFConfig &> ConfigManager::getXCOFFConfig() const {
return XCOFF;
}
-
-} // end namespace objcopy
-} // end namespace llvm
diff --git a/llvm/lib/ObjCopy/ObjCopy.cpp b/llvm/lib/ObjCopy/ObjCopy.cpp
index 54dab11..d9a190d 100644
--- a/llvm/lib/ObjCopy/ObjCopy.cpp
+++ b/llvm/lib/ObjCopy/ObjCopy.cpp
@@ -26,15 +26,13 @@
#include "llvm/Object/Wasm.h"
#include "llvm/Object/XCOFFObjectFile.h"
-namespace llvm {
-namespace objcopy {
-
+using namespace llvm;
using namespace llvm::object;
/// The function executeObjcopyOnBinary does the dispatch based on the format
/// of the input binary (ELF, MachO or COFF).
-Error executeObjcopyOnBinary(const MultiFormatConfig &Config,
- object::Binary &In, raw_ostream &Out) {
+Error objcopy::executeObjcopyOnBinary(const MultiFormatConfig &Config,
+ object::Binary &In, raw_ostream &Out) {
if (auto *ELFBinary = dyn_cast<object::ELFObjectFileBase>(&In)) {
Expected<const ELFConfig &> ELFConfig = Config.getELFConfig();
if (!ELFConfig)
@@ -83,6 +81,3 @@ Error executeObjcopyOnBinary(const MultiFormatConfig &Config,
return createStringError(object_error::invalid_file_type,
"unsupported object file format");
}
-
-} // end namespace objcopy
-} // end namespace llvm
diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp
index a7b9f25..e99649d 100644
--- a/llvm/lib/Support/APFloat.cpp
+++ b/llvm/lib/Support/APFloat.cpp
@@ -3262,9 +3262,8 @@ bool IEEEFloat::convertFromStringSpecials(StringRef str) {
return true;
}
- bool IsNegative = str.front() == '-';
+ bool IsNegative = str.consume_front("-");
if (IsNegative) {
- str = str.drop_front();
if (str.size() < MIN_NAME_SIZE)
return false;
@@ -3275,16 +3274,13 @@ bool IEEEFloat::convertFromStringSpecials(StringRef str) {
}
// If we have a 's' (or 'S') prefix, then this is a Signaling NaN.
- bool IsSignaling = str.front() == 's' || str.front() == 'S';
+ bool IsSignaling = str.consume_front_insensitive("s");
if (IsSignaling) {
- str = str.drop_front();
if (str.size() < MIN_NAME_SIZE)
return false;
}
- if (str.starts_with("nan") || str.starts_with("NaN")) {
- str = str.drop_front(3);
-
+ if (str.consume_front("nan") || str.consume_front("NaN")) {
// A NaN without payload.
if (str.empty()) {
makeNaN(IsSignaling, IsNegative);
diff --git a/llvm/lib/Support/Chrono.cpp b/llvm/lib/Support/Chrono.cpp
index 993d200..07a5940 100644
--- a/llvm/lib/Support/Chrono.cpp
+++ b/llvm/lib/Support/Chrono.cpp
@@ -8,6 +8,7 @@
#include "llvm/Support/Chrono.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
@@ -15,12 +16,12 @@ namespace llvm {
using namespace sys;
-const char llvm::detail::unit<std::ratio<3600>>::value[] = "h";
-const char llvm::detail::unit<std::ratio<60>>::value[] = "m";
-const char llvm::detail::unit<std::ratio<1>>::value[] = "s";
-const char llvm::detail::unit<std::milli>::value[] = "ms";
-const char llvm::detail::unit<std::micro>::value[] = "us";
-const char llvm::detail::unit<std::nano>::value[] = "ns";
+LLVM_ABI const char llvm::detail::unit<std::ratio<3600>>::value[] = "h";
+LLVM_ABI const char llvm::detail::unit<std::ratio<60>>::value[] = "m";
+LLVM_ABI const char llvm::detail::unit<std::ratio<1>>::value[] = "s";
+LLVM_ABI const char llvm::detail::unit<std::milli>::value[] = "ms";
+LLVM_ABI const char llvm::detail::unit<std::micro>::value[] = "us";
+LLVM_ABI const char llvm::detail::unit<std::nano>::value[] = "ns";
static inline struct tm getStructTM(TimePoint<> TP) {
struct tm Storage;
diff --git a/llvm/lib/Support/CommandLine.cpp b/llvm/lib/Support/CommandLine.cpp
index f1dd39c..49cefb1 100644
--- a/llvm/lib/Support/CommandLine.cpp
+++ b/llvm/lib/Support/CommandLine.cpp
@@ -29,6 +29,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/config.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Error.h"
@@ -54,18 +55,18 @@ using namespace cl;
//
namespace llvm {
namespace cl {
-template class basic_parser<bool>;
-template class basic_parser<boolOrDefault>;
-template class basic_parser<int>;
-template class basic_parser<long>;
-template class basic_parser<long long>;
-template class basic_parser<unsigned>;
-template class basic_parser<unsigned long>;
-template class basic_parser<unsigned long long>;
-template class basic_parser<double>;
-template class basic_parser<float>;
-template class basic_parser<std::string>;
-template class basic_parser<char>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<bool>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<boolOrDefault>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<int>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<long>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<long long>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<unsigned>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<unsigned long>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<unsigned long long>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<double>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<float>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<std::string>;
+template class LLVM_EXPORT_TEMPLATE basic_parser<char>;
template class opt<unsigned>;
template class opt<int>;
@@ -726,7 +727,7 @@ static Option *getOptionPred(StringRef Name, size_t &Length,
// characters in it (so that the next iteration will not be the empty
// string.
while (OMI == OptionsMap.end() && Name.size() > 1) {
- Name = Name.substr(0, Name.size() - 1); // Chop off the last character.
+ Name = Name.drop_back();
OMI = OptionsMap.find(Name);
if (OMI != OptionsMap.end() && !Pred(OMI->getValue()))
OMI = OptionsMap.end();
diff --git a/llvm/lib/Support/TrieRawHashMap.cpp b/llvm/lib/Support/TrieRawHashMap.cpp
index 11d79a6..bb779fe8 100644
--- a/llvm/lib/Support/TrieRawHashMap.cpp
+++ b/llvm/lib/Support/TrieRawHashMap.cpp
@@ -62,7 +62,7 @@ class TrieSubtrie final
public:
using Slot = LazyAtomicPointer<TrieNode>;
- Slot &get(size_t I) { return getTrailingObjects<Slot>()[I]; }
+ Slot &get(size_t I) { return getTrailingObjects()[I]; }
TrieNode *load(size_t I) { return get(I).load(); }
unsigned size() const { return Size; }
@@ -190,7 +190,7 @@ public:
}
// Get the root which is the trailing object.
- TrieSubtrie *getRoot() { return getTrailingObjects<TrieSubtrie>(); }
+ TrieSubtrie *getRoot() { return getTrailingObjects(); }
static void *operator new(size_t Size) { return ::operator new(Size); }
void operator delete(void *Ptr) { ::operator delete(Ptr); }
diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp
index f3d54e6..51ed259 100644
--- a/llvm/lib/TableGen/Record.cpp
+++ b/llvm/lib/TableGen/Record.cpp
@@ -240,7 +240,7 @@ static void ProfileRecordRecTy(FoldingSetNodeID &ID,
RecordRecTy::RecordRecTy(RecordKeeper &RK, ArrayRef<const Record *> Classes)
: RecTy(RecordRecTyKind, RK), NumClasses(Classes.size()) {
- llvm::uninitialized_copy(Classes, getTrailingObjects<const Record *>());
+ llvm::uninitialized_copy(Classes, getTrailingObjects());
}
const RecordRecTy *RecordRecTy::get(RecordKeeper &RK,
@@ -473,7 +473,7 @@ static void ProfileBitsInit(FoldingSetNodeID &ID,
BitsInit::BitsInit(RecordKeeper &RK, ArrayRef<const Init *> Bits)
: TypedInit(IK_BitsInit, BitsRecTy::get(RK, Bits.size())),
NumBits(Bits.size()) {
- llvm::uninitialized_copy(Bits, getTrailingObjects<const Init *>());
+ llvm::uninitialized_copy(Bits, getTrailingObjects());
}
BitsInit *BitsInit::get(RecordKeeper &RK, ArrayRef<const Init *> Bits) {
@@ -493,7 +493,7 @@ BitsInit *BitsInit::get(RecordKeeper &RK, ArrayRef<const Init *> Bits) {
}
void BitsInit::Profile(FoldingSetNodeID &ID) const {
- ProfileBitsInit(ID, ArrayRef(getTrailingObjects<const Init *>(), NumBits));
+ ProfileBitsInit(ID, getBits());
}
const Init *BitsInit::convertInitializerTo(const RecTy *Ty) const {
@@ -706,7 +706,7 @@ static void ProfileListInit(FoldingSetNodeID &ID, ArrayRef<const Init *> Range,
ListInit::ListInit(ArrayRef<const Init *> Elements, const RecTy *EltTy)
: TypedInit(IK_ListInit, ListRecTy::get(EltTy)),
NumValues(Elements.size()) {
- llvm::uninitialized_copy(Elements, getTrailingObjects<const Init *>());
+ llvm::uninitialized_copy(Elements, getTrailingObjects());
}
const ListInit *ListInit::get(ArrayRef<const Init *> Elements,
@@ -751,8 +751,9 @@ const Init *ListInit::convertInitializerTo(const RecTy *Ty) const {
Elements.push_back(CI);
if (CI != I)
Changed = true;
- } else
+ } else {
return nullptr;
+ }
if (!Changed)
return this;
@@ -1787,22 +1788,21 @@ const Init *TernOpInit::Fold(const Record *CurRec) const {
return Val->getDefInit();
}
if (LHSv && MHSv && RHSv) {
- std::string Val = std::string(RHSv->getName());
+ std::string Val = RHSv->getName().str();
if (LHSv->getAsString() == RHSv->getAsString())
- Val = std::string(MHSv->getName());
+ Val = MHSv->getName().str();
return VarInit::get(Val, getType());
}
if (LHSs && MHSs && RHSs) {
- std::string Val = std::string(RHSs->getValue());
+ std::string Val = RHSs->getValue().str();
std::string::size_type found;
std::string::size_type idx = 0;
while (true) {
- found = Val.find(std::string(LHSs->getValue()), idx);
+ found = Val.find(LHSs->getValue().str(), idx);
if (found == std::string::npos)
break;
- Val.replace(found, LHSs->getValue().size(),
- std::string(MHSs->getValue()));
+ Val.replace(found, LHSs->getValue().size(), MHSs->getValue().str());
idx = found + MHSs->getValue().size();
}
@@ -2417,7 +2417,7 @@ const RecTy *DefInit::getFieldType(const StringInit *FieldName) const {
return nullptr;
}
-std::string DefInit::getAsString() const { return std::string(Def->getName()); }
+std::string DefInit::getAsString() const { return Def->getName().str(); }
static void ProfileVarDefInit(FoldingSetNodeID &ID, const Record *Class,
ArrayRef<const ArgumentInit *> Args) {
@@ -2432,7 +2432,7 @@ VarDefInit::VarDefInit(SMLoc Loc, const Record *Class,
ArrayRef<const ArgumentInit *> Args)
: TypedInit(IK_VarDefInit, RecordRecTy::get(Class)), Loc(Loc), Class(Class),
NumArgs(Args.size()) {
- llvm::uninitialized_copy(Args, getTrailingObjects<const ArgumentInit *>());
+ llvm::uninitialized_copy(Args, getTrailingObjects());
}
const VarDefInit *VarDefInit::get(SMLoc Loc, const Record *Class,
@@ -2616,7 +2616,7 @@ static void ProfileCondOpInit(FoldingSetNodeID &ID,
CondOpInit::CondOpInit(ArrayRef<const Init *> Conds,
ArrayRef<const Init *> Values, const RecTy *Type)
: TypedInit(IK_CondOpInit, Type), NumConds(Conds.size()), ValType(Type) {
- auto *TrailingObjects = getTrailingObjects<const Init *>();
+ const Init **TrailingObjects = getTrailingObjects();
llvm::uninitialized_copy(Conds, TrailingObjects);
llvm::uninitialized_copy(Values, TrailingObjects + NumConds);
}
diff --git a/llvm/lib/TableGen/SetTheory.cpp b/llvm/lib/TableGen/SetTheory.cpp
index fefe03b..80c2a55 100644
--- a/llvm/lib/TableGen/SetTheory.cpp
+++ b/llvm/lib/TableGen/SetTheory.cpp
@@ -191,7 +191,7 @@ struct SequenceOp : public SetTheory::Operator {
std::string Format;
if (const auto *SI = dyn_cast<StringInit>(Expr->arg_begin()[0]))
- Format = std::string(SI->getValue());
+ Format = SI->getValue().str();
else
PrintFatalError(Loc, "Format must be a string: " + Expr->getAsString());
diff --git a/llvm/lib/TableGen/TGParser.cpp b/llvm/lib/TableGen/TGParser.cpp
index 423daf6..87a1fb64 100644
--- a/llvm/lib/TableGen/TGParser.cpp
+++ b/llvm/lib/TableGen/TGParser.cpp
@@ -1928,9 +1928,10 @@ const Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
const auto *Arg2 = cast<TypedInit>(Args[2]);
assert(isa<IntRecTy>(Arg2->getType()));
RHS = Arg2;
- } else
+ } else {
// (start, end, 1)
RHS = IntInit::get(Records, 1);
+ }
}
return TernOpInit::get(TernOpInit::RANGE, LHS, MHS, RHS,
IntRecTy::get(Records)->getListTy())
@@ -1946,7 +1947,7 @@ const Init *TGParser::ParseOperation(Record *CurRec, const RecTy *ItemType) {
const RecTy *Type = nullptr;
tgtok::TokKind LexCode = Lex.getCode();
- Lex.Lex(); // eat the operation
+ Lex.Lex(); // Eat the operation.
switch (LexCode) {
default: llvm_unreachable("Unhandled code!");
case tgtok::XDag:
@@ -4326,7 +4327,7 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
// through its template argument names. Substs contains a substitution
// value for each argument, either the value specified or the default.
// Then we can resolve the template arguments.
- MultiClass *MC = MultiClasses[std::string(Ref.Rec->getName())].get();
+ MultiClass *MC = MultiClasses[Ref.Rec->getName().str()].get();
assert(MC && "Didn't lookup multiclass correctly?");
SubstStack Substs;
diff --git a/llvm/lib/TableGen/TGParser.h b/llvm/lib/TableGen/TGParser.h
index 6094bba..017cc5f 100644
--- a/llvm/lib/TableGen/TGParser.h
+++ b/llvm/lib/TableGen/TGParser.h
@@ -131,7 +131,7 @@ public:
}
void addVar(StringRef Name, const Init *I) {
- bool Ins = Vars.try_emplace(std::string(Name), I).second;
+ bool Ins = Vars.try_emplace(Name.str(), I).second;
(void)Ins;
assert(Ins && "Local variable already exists");
}
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 78ac57e..040662a 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -3619,6 +3619,13 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
if (Reg == BasePointerReg)
SavedRegs.set(Reg);
+ // Don't save manually reserved registers set through +reserve-x#i,
+ // even for callee-saved registers, as per GCC's behavior.
+ if (RegInfo->isUserReservedReg(MF, Reg)) {
+ SavedRegs.reset(Reg);
+ continue;
+ }
+
bool RegUsed = SavedRegs.test(Reg);
unsigned PairedReg = AArch64::NoRegister;
const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 795ac68..ad48be4 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5725,8 +5725,8 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
int Pattern) {
- if (VT == MVT::nxv1i1 && Pattern == AArch64SVEPredPattern::all)
- return DAG.getConstant(1, DL, MVT::nxv1i1);
+ if (Pattern == AArch64SVEPredPattern::all)
+ return DAG.getConstant(1, DL, VT);
return DAG.getNode(AArch64ISD::PTRUE, DL, VT,
DAG.getTargetConstant(Pattern, DL, MVT::i32));
}
@@ -21997,6 +21997,30 @@ SDValue tryLowerPartialReductionToWideAdd(SDNode *N,
return DAG.getNode(TopOpcode, DL, AccVT, BottomNode, ExtOp);
}
+static SDValue combineSVEBitSel(unsigned IID, SDNode *N, SelectionDAG &DAG) {
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ SDValue Op1 = N->getOperand(1);
+ SDValue Op2 = N->getOperand(2);
+ SDValue Op3 = N->getOperand(3);
+
+ switch (IID) {
+ default:
+ llvm_unreachable("Called with wrong intrinsic!");
+ case Intrinsic::aarch64_sve_bsl:
+ return DAG.getNode(AArch64ISD::BSP, DL, VT, Op3, Op1, Op2);
+ case Intrinsic::aarch64_sve_bsl1n:
+ return DAG.getNode(AArch64ISD::BSP, DL, VT, Op3, DAG.getNOT(DL, Op1, VT),
+ Op2);
+ case Intrinsic::aarch64_sve_bsl2n:
+ return DAG.getNode(AArch64ISD::BSP, DL, VT, Op3, Op1,
+ DAG.getNOT(DL, Op2, VT));
+ case Intrinsic::aarch64_sve_nbsl:
+ return DAG.getNOT(DL, DAG.getNode(AArch64ISD::BSP, DL, VT, Op3, Op1, Op2),
+ VT);
+ }
+}
+
static SDValue performIntrinsicCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
@@ -22319,6 +22343,11 @@ static SDValue performIntrinsicCombine(SDNode *N,
AArch64CC::LAST_ACTIVE);
case Intrinsic::aarch64_sve_whilelo:
return tryCombineWhileLo(N, DCI, Subtarget);
+ case Intrinsic::aarch64_sve_bsl:
+ case Intrinsic::aarch64_sve_bsl1n:
+ case Intrinsic::aarch64_sve_bsl2n:
+ case Intrinsic::aarch64_sve_nbsl:
+ return combineSVEBitSel(IID, N, DAG);
}
return SDValue();
}
@@ -25030,7 +25059,7 @@ static SDValue foldCSELofLASTB(SDNode *Op, SelectionDAG &DAG) {
if (AnyPred.getOpcode() == AArch64ISD::REINTERPRET_CAST)
AnyPred = AnyPred.getOperand(0);
- if (TruePred != AnyPred && TruePred.getOpcode() != AArch64ISD::PTRUE)
+ if (TruePred != AnyPred && !isAllActivePredicate(DAG, TruePred))
return SDValue();
SDValue LastB = Op->getOperand(0);
@@ -28568,7 +28597,7 @@ static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) {
}
}
-// Return a PTRUE with active lanes corresponding to the extent of VT.
+// Return a predicate with active lanes corresponding to the extent of VT.
static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
EVT VT) {
assert(VT.isFixedLengthVector() &&
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index bfc6247..b02a907f7 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -1115,6 +1115,7 @@ let RecomputePerFunction = 1 in {
def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
+
// Toggles patterns which aren't beneficial in GlobalISel when we aren't
// optimizing. This allows us to selectively use patterns without impacting
// SelectionDAG's behaviour.
@@ -4038,6 +4039,10 @@ multiclass LoadInsertPatterns<SDPatternOperator LoadOp, ValueType VT, ValueType
ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg>;
}
+// Accept i8 scalar argument in GlobalISel.
+defm : LoadInsertPatterns<load, v16i8, v8i8, nxv16i8, i8,
+ LDRBui, LDURBi, LDRBroW, LDRBroX,
+ ro8, am_indexed8, am_unscaled8, uimm12s1, bsub>;
defm : LoadInsertPatterns<extloadi8, v16i8, v8i8, nxv16i8, i32,
LDRBui, LDURBi, LDRBroW, LDRBroX,
ro8, am_indexed8, am_unscaled8, uimm12s1, bsub>;
@@ -7309,12 +7314,12 @@ multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64, ValueType VTSVE
(VTScal (vector_extract (VT64 V64:$Rn), (i64 imm:$Immn))),
(i64 imm:$Immd))),
(INS V128:$src, imm:$Immd,
- (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
+ (VT128 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub)), imm:$Immn)>;
def : Pat<(VT64 (vector_insert V64:$src,
(VTScal (vector_extract (VT128 V128:$Rn), (i64 imm:$Immn))),
(i64 imm:$Immd))),
- (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
+ (EXTRACT_SUBREG (INS (VT128 (SUBREG_TO_REG (i64 0), V64:$src, dsub)),
imm:$Immd, V128:$Rn, imm:$Immn),
dsub)>;
@@ -7332,6 +7337,8 @@ defm : Neon_INS_elt_pattern<v8bf16, v4bf16, nxv8bf16, bf16, VectorIndexH, INSvi1
defm : Neon_INS_elt_pattern<v4f32, v2f32, nxv4f32, f32, VectorIndexS, INSvi32lane, DUPi32, ssub>;
defm : Neon_INS_elt_pattern<v2f64, v1f64, nxv2f64, f64, VectorIndexD, INSvi64lane, DUPi64, dsub>;
+// Accept i8 scalar argument in GlobalISel.
+defm : Neon_INS_elt_pattern<v16i8, v8i8, nxv16i8, i8, VectorIndexB, INSvi8lane, DUPi8, bsub>;
defm : Neon_INS_elt_pattern<v16i8, v8i8, nxv16i8, i32, VectorIndexB, INSvi8lane, DUPi8, bsub>;
defm : Neon_INS_elt_pattern<v8i16, v4i16, nxv8i16, i32, VectorIndexH, INSvi16lane, DUPi16, hsub>;
defm : Neon_INS_elt_pattern<v4i32, v2i32, nxv4i32, i32, VectorIndexS, INSvi32lane, DUPi32, ssub>;
@@ -8809,6 +8816,8 @@ class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
(STy (scalar_load GPR64sp:$Rn)), (i64 VecIndex:$idx)),
(LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
+// Accept i8 scalar argument in GlobalISel.
+def : Ld1Lane128Pat<load, VectorIndexB, v16i8, i8, LD1i8>;
def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>;
def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
@@ -8882,6 +8891,8 @@ class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
VecIndex:$idx, GPR64sp:$Rn),
dsub)>;
+// Accept i8 scalar argument in GlobalISel.
+def : Ld1Lane64Pat<load, VectorIndexB, v8i8, i8, LD1i8>;
def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 9f242bb..1dc7318 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -519,6 +519,18 @@ AArch64RegisterInfo::getStrictlyReservedRegs(const MachineFunction &MF) const {
}
BitVector
+AArch64RegisterInfo::getUserReservedRegs(const MachineFunction &MF) const {
+ BitVector Reserved(getNumRegs());
+ for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
+ // ReserveXRegister is set for registers manually reserved
+ // through +reserve-x#i.
+ if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i))
+ markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
+ }
+ return Reserved;
+}
+
+BitVector
AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
@@ -551,6 +563,11 @@ bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
return getReservedRegs(MF)[Reg];
}
+bool AArch64RegisterInfo::isUserReservedReg(const MachineFunction &MF,
+ MCRegister Reg) const {
+ return getUserReservedRegs(MF)[Reg];
+}
+
bool AArch64RegisterInfo::isStrictlyReservedReg(const MachineFunction &MF,
MCRegister Reg) const {
return getStrictlyReservedRegs(MF)[Reg];
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.h b/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
index ddee0d6a..cc94be6 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
@@ -35,6 +35,7 @@ public:
}
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const;
+ bool isUserReservedReg(const MachineFunction &MF, MCRegister Reg) const;
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const;
bool isAnyArgRegReserved(const MachineFunction &MF) const;
void emitReservedArgRegCallError(const MachineFunction &MF) const;
@@ -93,6 +94,7 @@ public:
const uint32_t *getWindowsStackProbePreservedMask() const;
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const;
+ BitVector getUserReservedRegs(const MachineFunction &MF) const;
BitVector getReservedRegs(const MachineFunction &MF) const override;
std::optional<std::string>
explainReservedReg(const MachineFunction &MF,
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index bd39467..d6bd59a 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -545,12 +545,18 @@ def AArch64umulh : PatFrag<(ops node:$op1, node:$op2),
def AArch64bsl : PatFrags<(ops node:$Op1, node:$Op2, node:$Op3),
- [(int_aarch64_sve_bsl node:$Op1, node:$Op2, node:$Op3),
- (AArch64bsp node:$Op3, node:$Op1, node:$Op2)]>;
+ [(AArch64bsp node:$Op3, node:$Op1, node:$Op2),
+ (or (and node:$Op1, node:$Op3), (and node:$Op2, (vnot node:$Op3)))]>;
-def AArch64nbsl : PatFrags<(ops node:$Op1, node:$Op2, node:$Op3),
- [(int_aarch64_sve_nbsl node:$Op1, node:$Op2, node:$Op3),
- (vnot (AArch64bsp node:$Op3, node:$Op1, node:$Op2))]>;
+def AArch64bsl1n : PatFrag<(ops node:$Op1, node:$Op2, node:$Op3),
+ (AArch64bsl (vnot node:$Op1), node:$Op2, node:$Op3)>;
+
+def AArch64bsl2n : PatFrags<(ops node:$Op1, node:$Op2, node:$Op3),
+ [(AArch64bsl node:$Op1, (vnot node:$Op2), node:$Op3),
+ (or (and node:$Op1, node:$Op3), (vnot (or node:$Op2, node:$Op3)))]>;
+
+def AArch64nbsl : PatFrag<(ops node:$Op1, node:$Op2, node:$Op3),
+ (vnot (AArch64bsl node:$Op1, node:$Op2, node:$Op3))>;
let Predicates = [HasSVE] in {
@@ -3934,8 +3940,8 @@ let Predicates = [HasSVE2_or_SME] in {
defm EOR3_ZZZZ : sve2_int_bitwise_ternary_op<0b000, "eor3", AArch64eor3>;
defm BCAX_ZZZZ : sve2_int_bitwise_ternary_op<0b010, "bcax", AArch64bcax>;
defm BSL_ZZZZ : sve2_int_bitwise_ternary_op<0b001, "bsl", AArch64bsl>;
- defm BSL1N_ZZZZ : sve2_int_bitwise_ternary_op<0b011, "bsl1n", int_aarch64_sve_bsl1n>;
- defm BSL2N_ZZZZ : sve2_int_bitwise_ternary_op<0b101, "bsl2n", int_aarch64_sve_bsl2n>;
+ defm BSL1N_ZZZZ : sve2_int_bitwise_ternary_op<0b011, "bsl1n", AArch64bsl1n>;
+ defm BSL2N_ZZZZ : sve2_int_bitwise_ternary_op<0b101, "bsl2n", AArch64bsl2n>;
defm NBSL_ZZZZ : sve2_int_bitwise_ternary_op<0b111, "nbsl", AArch64nbsl>;
// SVE2 bitwise xor and rotate right by immediate
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index cefb7b9..664c360 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -435,10 +435,7 @@ public:
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
ElementCount VF) const override;
- bool preferPredicatedReductionSelect(unsigned Opcode,
- Type *Ty) const override {
- return ST->hasSVE();
- }
+ bool preferPredicatedReductionSelect() const override { return ST->hasSVE(); }
InstructionCost
getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index b572f81..5a91773 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -65,6 +65,7 @@ ModulePass *createAMDGPULowerBufferFatPointersPass();
FunctionPass *createSIModeRegisterPass();
FunctionPass *createGCNPreRAOptimizationsLegacyPass();
FunctionPass *createAMDGPUPreloadKernArgPrologLegacyPass();
+ModulePass *createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *);
struct AMDGPUSimplifyLibCallsPass : PassInfoMixin<AMDGPUSimplifyLibCallsPass> {
AMDGPUSimplifyLibCallsPass() {}
@@ -233,6 +234,9 @@ extern char &GCNRegPressurePrinterID;
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &);
extern char &AMDGPUPreloadKernArgPrologLegacyID;
+void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &);
+extern char &AMDGPUPreloadKernelArgumentsLegacyID;
+
// Passes common to R600 and SI
FunctionPass *createAMDGPUPromoteAlloca();
void initializeAMDGPUPromoteAllocaPass(PassRegistry&);
@@ -347,6 +351,16 @@ public:
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
+class AMDGPUPreloadKernelArgumentsPass
+ : public PassInfoMixin<AMDGPUPreloadKernelArgumentsPass> {
+ const TargetMachine &TM;
+
+public:
+ explicit AMDGPUPreloadKernelArgumentsPass(const TargetMachine &TM) : TM(TM) {}
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
class AMDGPUAnnotateUniformValuesPass
: public PassInfoMixin<AMDGPUAnnotateUniformValuesPass> {
public:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index 78e75f8..433144a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -25,10 +25,6 @@
using namespace llvm;
-static cl::opt<unsigned> KernargPreloadCount(
- "amdgpu-kernarg-preload-count",
- cl::desc("How many kernel arguments to preload onto SGPRs"), cl::init(0));
-
static cl::opt<unsigned> IndirectCallSpecializationThreshold(
"amdgpu-indirect-call-specialization-threshold",
cl::desc(
@@ -1327,21 +1323,6 @@ struct AAAMDGPUNoAGPR
const char AAAMDGPUNoAGPR::ID = 0;
-static void addPreloadKernArgHint(Function &F, TargetMachine &TM) {
- const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
- for (unsigned I = 0;
- I < F.arg_size() &&
- I < std::min(KernargPreloadCount.getValue(), ST.getMaxNumUserSGPRs());
- ++I) {
- Argument &Arg = *F.getArg(I);
- // Check for incompatible attributes.
- if (Arg.hasByRefAttr() || Arg.hasNestAttr())
- break;
-
- Arg.addAttr(Attribute::InReg);
- }
-}
-
static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
AMDGPUAttributorOptions Options,
ThinOrFullLTOPhase LTOPhase) {
@@ -1396,8 +1377,6 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
if (!AMDGPU::isEntryFunctionCC(CC)) {
A.getOrCreateAAFor<AAAMDFlatWorkGroupSize>(IRPosition::function(*F));
A.getOrCreateAAFor<AAAMDWavesPerEU>(IRPosition::function(*F));
- } else if (CC == CallingConv::AMDGPU_KERNEL) {
- addPreloadKernArgHint(*F, TM);
}
for (auto &I : instructions(F)) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index e76396f..f2a2cf4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -59,6 +59,28 @@ static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
return maxnum(Src0, Src1);
}
+enum class KnownIEEEMode { Unknown, On, Off };
+
+/// Return KnownIEEEMode::On if we know if the use context can assume
+/// "amdgpu-ieee"="true" and KnownIEEEMode::Off if we can assume
+/// "amdgpu-ieee"="false".
+static KnownIEEEMode fpenvIEEEMode(const Instruction &I,
+ const GCNSubtarget &ST) {
+ if (!ST.hasIEEEMode()) // Only mode on gfx12
+ return KnownIEEEMode::On;
+
+ const Function *F = I.getFunction();
+ if (!F)
+ return KnownIEEEMode::Unknown;
+
+ Attribute IEEEAttr = F->getFnAttribute("amdgpu-ieee");
+ if (IEEEAttr.isValid())
+ return IEEEAttr.getValueAsBool() ? KnownIEEEMode::On : KnownIEEEMode::Off;
+
+ return AMDGPU::isShader(F->getCallingConv()) ? KnownIEEEMode::Off
+ : KnownIEEEMode::On;
+}
+
// Check if a value can be converted to a 16-bit value without losing
// precision.
// The value is expected to be either a float (IsFloat = true) or an unsigned
@@ -843,9 +865,6 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
}
case Intrinsic::amdgcn_fmed3: {
- // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
- // for the shader.
-
Value *Src0 = II.getArgOperand(0);
Value *Src1 = II.getArgOperand(1);
Value *Src2 = II.getArgOperand(2);
@@ -855,16 +874,88 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
return IC.replaceInstUsesWith(II, Src);
}
+ if (II.isStrictFP())
+ break;
+
+ // med3 with a nan input acts like
+ // v_min_f32(v_min_f32(s0, s1), s2)
+ //
+ // Signalingness is ignored with ieee=0, so we fold to
+ // minimumnum/maximumnum. With ieee=1, the v_min_f32 acts like llvm.minnum
+ // with signaling nan handling. With ieee=0, like llvm.minimumnum except a
+ // returned signaling nan will not be quieted.
+
+ // ieee=1
+ // s0 snan: s2
+ // s1 snan: s2
+ // s2 snan: qnan
+
+ // s0 qnan: min(s1, s2)
+ // s1 qnan: min(s0, s2)
+ // s2 qnan: min(s0, s1)
+
+ // ieee=0
+ // s0 _nan: min(s1, s2)
+ // s1 _nan: min(s0, s2)
+ // s2 _nan: min(s0, s1)
+
// Checking for NaN before canonicalization provides better fidelity when
// mapping other operations onto fmed3 since the order of operands is
// unchanged.
Value *V = nullptr;
- if (match(Src0, PatternMatch::m_NaN()) || isa<UndefValue>(Src0)) {
- V = IC.Builder.CreateMinNum(Src1, Src2);
- } else if (match(Src1, PatternMatch::m_NaN()) || isa<UndefValue>(Src1)) {
- V = IC.Builder.CreateMinNum(Src0, Src2);
- } else if (match(Src2, PatternMatch::m_NaN()) || isa<UndefValue>(Src2)) {
- V = IC.Builder.CreateMaxNum(Src0, Src1);
+ const APFloat *ConstSrc0 = nullptr;
+ const APFloat *ConstSrc1 = nullptr;
+ const APFloat *ConstSrc2 = nullptr;
+
+ // TODO: Also can fold to 2 operands with infinities.
+ if ((match(Src0, m_APFloat(ConstSrc0)) && ConstSrc0->isNaN()) ||
+ isa<UndefValue>(Src0)) {
+ switch (fpenvIEEEMode(II, *ST)) {
+ case KnownIEEEMode::On:
+ // TODO: If Src2 is snan, does it need quieting?
+ if (ConstSrc0 && ConstSrc0->isSignaling())
+ return IC.replaceInstUsesWith(II, Src2);
+ V = IC.Builder.CreateMinNum(Src1, Src2);
+ break;
+ case KnownIEEEMode::Off:
+ V = IC.Builder.CreateMinimumNum(Src1, Src2);
+ break;
+ case KnownIEEEMode::Unknown:
+ break;
+ }
+ } else if ((match(Src1, m_APFloat(ConstSrc1)) && ConstSrc1->isNaN()) ||
+ isa<UndefValue>(Src1)) {
+ switch (fpenvIEEEMode(II, *ST)) {
+ case KnownIEEEMode::On:
+ // TODO: If Src2 is snan, does it need quieting?
+ if (ConstSrc1 && ConstSrc1->isSignaling())
+ return IC.replaceInstUsesWith(II, Src2);
+
+ V = IC.Builder.CreateMinNum(Src0, Src2);
+ break;
+ case KnownIEEEMode::Off:
+ V = IC.Builder.CreateMinimumNum(Src0, Src2);
+ break;
+ case KnownIEEEMode::Unknown:
+ break;
+ }
+ } else if ((match(Src2, m_APFloat(ConstSrc2)) && ConstSrc2->isNaN()) ||
+ isa<UndefValue>(Src2)) {
+ switch (fpenvIEEEMode(II, *ST)) {
+ case KnownIEEEMode::On:
+ if (ConstSrc2 && ConstSrc2->isSignaling()) {
+ auto *Quieted = ConstantFP::get(II.getType(), ConstSrc2->makeQuiet());
+ return IC.replaceInstUsesWith(II, Quieted);
+ }
+
+ V = IC.Builder.CreateMinNum(Src0, Src1);
+ break;
+ case KnownIEEEMode::Off:
+ V = IC.Builder.CreateMaximumNum(Src0, Src1);
+ break;
+ case KnownIEEEMode::Unknown:
+ break;
+ }
}
if (V) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
index a4e6768..dec781d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
@@ -27,231 +27,6 @@ using namespace llvm;
namespace {
-class PreloadKernelArgInfo {
-private:
- Function &F;
- const GCNSubtarget &ST;
- unsigned NumFreeUserSGPRs;
-
- enum HiddenArg : unsigned {
- HIDDEN_BLOCK_COUNT_X,
- HIDDEN_BLOCK_COUNT_Y,
- HIDDEN_BLOCK_COUNT_Z,
- HIDDEN_GROUP_SIZE_X,
- HIDDEN_GROUP_SIZE_Y,
- HIDDEN_GROUP_SIZE_Z,
- HIDDEN_REMAINDER_X,
- HIDDEN_REMAINDER_Y,
- HIDDEN_REMAINDER_Z,
- END_HIDDEN_ARGS
- };
-
- // Stores information about a specific hidden argument.
- struct HiddenArgInfo {
- // Offset in bytes from the location in the kernearg segment pointed to by
- // the implicitarg pointer.
- uint8_t Offset;
- // The size of the hidden argument in bytes.
- uint8_t Size;
- // The name of the hidden argument in the kernel signature.
- const char *Name;
- };
-
- static constexpr HiddenArgInfo HiddenArgs[END_HIDDEN_ARGS] = {
- {0, 4, "_hidden_block_count_x"}, {4, 4, "_hidden_block_count_y"},
- {8, 4, "_hidden_block_count_z"}, {12, 2, "_hidden_group_size_x"},
- {14, 2, "_hidden_group_size_y"}, {16, 2, "_hidden_group_size_z"},
- {18, 2, "_hidden_remainder_x"}, {20, 2, "_hidden_remainder_y"},
- {22, 2, "_hidden_remainder_z"}};
-
- static HiddenArg getHiddenArgFromOffset(unsigned Offset) {
- for (unsigned I = 0; I < END_HIDDEN_ARGS; ++I)
- if (HiddenArgs[I].Offset == Offset)
- return static_cast<HiddenArg>(I);
-
- return END_HIDDEN_ARGS;
- }
-
- static Type *getHiddenArgType(LLVMContext &Ctx, HiddenArg HA) {
- if (HA < END_HIDDEN_ARGS)
- return Type::getIntNTy(Ctx, HiddenArgs[HA].Size * 8);
-
- llvm_unreachable("Unexpected hidden argument.");
- }
-
- static const char *getHiddenArgName(HiddenArg HA) {
- if (HA < END_HIDDEN_ARGS) {
- return HiddenArgs[HA].Name;
- }
- llvm_unreachable("Unexpected hidden argument.");
- }
-
- // Clones the function after adding implicit arguments to the argument list
- // and returns the new updated function. Preloaded implicit arguments are
- // added up to and including the last one that will be preloaded, indicated by
- // LastPreloadIndex. Currently preloading is only performed on the totality of
- // sequential data from the kernarg segment including implicit (hidden)
- // arguments. This means that all arguments up to the last preloaded argument
- // will also be preloaded even if that data is unused.
- Function *cloneFunctionWithPreloadImplicitArgs(unsigned LastPreloadIndex) {
- FunctionType *FT = F.getFunctionType();
- LLVMContext &Ctx = F.getParent()->getContext();
- SmallVector<Type *, 16> FTypes(FT->param_begin(), FT->param_end());
- for (unsigned I = 0; I <= LastPreloadIndex; ++I)
- FTypes.push_back(getHiddenArgType(Ctx, HiddenArg(I)));
-
- FunctionType *NFT =
- FunctionType::get(FT->getReturnType(), FTypes, FT->isVarArg());
- Function *NF =
- Function::Create(NFT, F.getLinkage(), F.getAddressSpace(), F.getName());
-
- NF->copyAttributesFrom(&F);
- NF->copyMetadata(&F, 0);
- NF->setIsNewDbgInfoFormat(F.IsNewDbgInfoFormat);
-
- F.getParent()->getFunctionList().insert(F.getIterator(), NF);
- NF->takeName(&F);
- NF->splice(NF->begin(), &F);
-
- Function::arg_iterator NFArg = NF->arg_begin();
- for (Argument &Arg : F.args()) {
- Arg.replaceAllUsesWith(&*NFArg);
- NFArg->takeName(&Arg);
- ++NFArg;
- }
-
- AttrBuilder AB(Ctx);
- AB.addAttribute(Attribute::InReg);
- AB.addAttribute("amdgpu-hidden-argument");
- AttributeList AL = NF->getAttributes();
- for (unsigned I = 0; I <= LastPreloadIndex; ++I) {
- AL = AL.addParamAttributes(Ctx, NFArg->getArgNo(), AB);
- NFArg++->setName(getHiddenArgName(HiddenArg(I)));
- }
-
- NF->setAttributes(AL);
- F.replaceAllUsesWith(NF);
- F.setCallingConv(CallingConv::C);
- F.clearMetadata();
-
- return NF;
- }
-
-public:
- PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) {
- setInitialFreeUserSGPRsCount();
- }
-
- // Returns the maximum number of user SGPRs that we have available to preload
- // arguments.
- void setInitialFreeUserSGPRsCount() {
- GCNUserSGPRUsageInfo UserSGPRInfo(F, ST);
- NumFreeUserSGPRs = UserSGPRInfo.getNumFreeUserSGPRs();
- }
-
- bool tryAllocPreloadSGPRs(unsigned AllocSize, uint64_t ArgOffset,
- uint64_t LastExplicitArgOffset) {
- // Check if this argument may be loaded into the same register as the
- // previous argument.
- if (ArgOffset - LastExplicitArgOffset < 4 &&
- !isAligned(Align(4), ArgOffset))
- return true;
-
- // Pad SGPRs for kernarg alignment.
- ArgOffset = alignDown(ArgOffset, 4);
- unsigned Padding = ArgOffset - LastExplicitArgOffset;
- unsigned PaddingSGPRs = alignTo(Padding, 4) / 4;
- unsigned NumPreloadSGPRs = alignTo(AllocSize, 4) / 4;
- if (NumPreloadSGPRs + PaddingSGPRs > NumFreeUserSGPRs)
- return false;
-
- NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs);
- return true;
- }
-
- // Try to allocate SGPRs to preload implicit kernel arguments.
- void tryAllocImplicitArgPreloadSGPRs(uint64_t ImplicitArgsBaseOffset,
- uint64_t LastExplicitArgOffset,
- IRBuilder<> &Builder) {
- Function *ImplicitArgPtr = Intrinsic::getDeclarationIfExists(
- F.getParent(), Intrinsic::amdgcn_implicitarg_ptr);
- if (!ImplicitArgPtr)
- return;
-
- const DataLayout &DL = F.getParent()->getDataLayout();
- // Pair is the load and the load offset.
- SmallVector<std::pair<LoadInst *, unsigned>, 4> ImplicitArgLoads;
- for (auto *U : ImplicitArgPtr->users()) {
- Instruction *CI = dyn_cast<Instruction>(U);
- if (!CI || CI->getParent()->getParent() != &F)
- continue;
-
- for (auto *U : CI->users()) {
- int64_t Offset = 0;
- auto *Load = dyn_cast<LoadInst>(U); // Load from ImplicitArgPtr?
- if (!Load) {
- if (GetPointerBaseWithConstantOffset(U, Offset, DL) != CI)
- continue;
-
- Load = dyn_cast<LoadInst>(*U->user_begin()); // Load from GEP?
- }
-
- if (!Load || !Load->isSimple())
- continue;
-
- // FIXME: Expand to handle 64-bit implicit args and large merged loads.
- LLVMContext &Ctx = F.getParent()->getContext();
- Type *LoadTy = Load->getType();
- HiddenArg HA = getHiddenArgFromOffset(Offset);
- if (HA == END_HIDDEN_ARGS || LoadTy != getHiddenArgType(Ctx, HA))
- continue;
-
- ImplicitArgLoads.push_back(std::make_pair(Load, Offset));
- }
- }
-
- if (ImplicitArgLoads.empty())
- return;
-
- // Allocate loads in order of offset. We need to be sure that the implicit
- // argument can actually be preloaded.
- std::sort(ImplicitArgLoads.begin(), ImplicitArgLoads.end(), less_second());
-
- // If we fail to preload any implicit argument we know we don't have SGPRs
- // to preload any subsequent ones with larger offsets. Find the first
- // argument that we cannot preload.
- auto *PreloadEnd = std::find_if(
- ImplicitArgLoads.begin(), ImplicitArgLoads.end(),
- [&](const std::pair<LoadInst *, unsigned> &Load) {
- unsigned LoadSize = DL.getTypeStoreSize(Load.first->getType());
- unsigned LoadOffset = Load.second;
- if (!tryAllocPreloadSGPRs(LoadSize,
- LoadOffset + ImplicitArgsBaseOffset,
- LastExplicitArgOffset))
- return true;
-
- LastExplicitArgOffset =
- ImplicitArgsBaseOffset + LoadOffset + LoadSize;
- return false;
- });
-
- if (PreloadEnd == ImplicitArgLoads.begin())
- return;
-
- unsigned LastHiddenArgIndex = getHiddenArgFromOffset(PreloadEnd[-1].second);
- Function *NF = cloneFunctionWithPreloadImplicitArgs(LastHiddenArgIndex);
- assert(NF);
- for (const auto *I = ImplicitArgLoads.begin(); I != PreloadEnd; ++I) {
- LoadInst *LoadInst = I->first;
- unsigned LoadOffset = I->second;
- unsigned HiddenArgIndex = getHiddenArgFromOffset(LoadOffset);
- unsigned Index = NF->arg_size() - LastHiddenArgIndex + HiddenArgIndex - 1;
- Argument *Arg = NF->getArg(Index);
- LoadInst->replaceAllUsesWith(Arg);
- }
- }
-};
-
class AMDGPULowerKernelArguments : public FunctionPass {
public:
static char ID;
@@ -311,10 +86,6 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
uint64_t ExplicitArgOffset = 0;
- // Preloaded kernel arguments must be sequential.
- bool InPreloadSequence = true;
- PreloadKernelArgInfo PreloadInfo(F, ST);
-
for (Argument &Arg : F.args()) {
const bool IsByRef = Arg.hasByRefAttr();
Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
@@ -325,25 +96,10 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
uint64_t EltOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + BaseOffset;
- uint64_t LastExplicitArgOffset = ExplicitArgOffset;
ExplicitArgOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
- // Guard against the situation where hidden arguments have already been
- // lowered and added to the kernel function signiture, i.e. in a situation
- // where this pass has run twice.
- if (Arg.hasAttribute("amdgpu-hidden-argument"))
- break;
-
- // Try to preload this argument into user SGPRs.
- if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() &&
- !Arg.getType()->isAggregateType())
- if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, EltOffset,
- LastExplicitArgOffset))
- continue;
-
- InPreloadSequence = false;
-
- if (Arg.use_empty())
+ // Skip inreg arguments which should be preloaded.
+ if (Arg.use_empty() || Arg.hasInRegAttr())
continue;
// If this is byval, the loads are already explicit in the function. We just
@@ -483,14 +239,6 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
KernArgSegment->addRetAttr(
Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
- if (InPreloadSequence) {
- uint64_t ImplicitArgsBaseOffset =
- alignTo(ExplicitArgOffset, ST.getAlignmentForImplicitArgPtr()) +
- BaseOffset;
- PreloadInfo.tryAllocImplicitArgPreloadSGPRs(ImplicitArgsBaseOffset,
- ExplicitArgOffset, Builder);
- }
-
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index 98a1147..1345396 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -25,6 +25,7 @@ MODULE_PASS("amdgpu-lower-module-lds", AMDGPULowerModuleLDSPass(*this))
MODULE_PASS("amdgpu-perf-hint",
AMDGPUPerfHintAnalysisPass(
*static_cast<const GCNTargetMachine *>(this)))
+MODULE_PASS("amdgpu-preload-kernel-arguments", AMDGPUPreloadKernelArgumentsPass(*this))
MODULE_PASS("amdgpu-printf-runtime-binding", AMDGPUPrintfRuntimeBindingPass())
MODULE_PASS("amdgpu-remove-incompatible-functions", AMDGPURemoveIncompatibleFunctionsPass(*this))
MODULE_PASS("amdgpu-sw-lower-lds", AMDGPUSwLowerLDSPass(*this))
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernelArguments.cpp
new file mode 100644
index 0000000..c1626b4
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernelArguments.cpp
@@ -0,0 +1,358 @@
+//===- AMDGPUPreloadKernelArguments.cpp - Preload Kernel Arguments --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This pass preloads kernel arguments into user_data SGPRs before kernel
+/// execution begins. The number of registers available for preloading depends
+/// on the number of free user SGPRs, up to the hardware's maximum limit.
+/// Implicit arguments enabled in the kernel descriptor are allocated first,
+/// followed by SGPRs used for preloaded kernel arguments. (Reference:
+/// https://llvm.org/docs/AMDGPUUsage.html#initial-kernel-execution-state)
+/// Additionally, hidden kernel arguments may be preloaded, in which case they
+/// are appended to the kernel signature after explicit arguments. Preloaded
+/// arguments will be marked with `inreg`.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDGPUTargetMachine.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Pass.h"
+
+#define DEBUG_TYPE "amdgpu-preload-kernel-arguments"
+
+using namespace llvm;
+
+static cl::opt<unsigned> KernargPreloadCount(
+ "amdgpu-kernarg-preload-count",
+ cl::desc("How many kernel arguments to preload onto SGPRs"), cl::init(0));
+
+namespace {
+
+class AMDGPUPreloadKernelArgumentsLegacy : public ModulePass {
+ const GCNTargetMachine *TM;
+
+public:
+ static char ID;
+ explicit AMDGPUPreloadKernelArgumentsLegacy(
+ const GCNTargetMachine *TM = nullptr);
+
+ StringRef getPassName() const override {
+ return "AMDGPU Preload Kernel Arguments";
+ }
+
+ bool runOnModule(Module &M) override;
+};
+
+class PreloadKernelArgInfo {
+private:
+ Function &F;
+ const GCNSubtarget &ST;
+ unsigned NumFreeUserSGPRs;
+
+ enum HiddenArg : unsigned {
+ HIDDEN_BLOCK_COUNT_X,
+ HIDDEN_BLOCK_COUNT_Y,
+ HIDDEN_BLOCK_COUNT_Z,
+ HIDDEN_GROUP_SIZE_X,
+ HIDDEN_GROUP_SIZE_Y,
+ HIDDEN_GROUP_SIZE_Z,
+ HIDDEN_REMAINDER_X,
+ HIDDEN_REMAINDER_Y,
+ HIDDEN_REMAINDER_Z,
+ END_HIDDEN_ARGS
+ };
+
+ // Stores information about a specific hidden argument.
+ struct HiddenArgInfo {
+ // Offset in bytes from the location in the kernearg segment pointed to by
+ // the implicitarg pointer.
+ uint8_t Offset;
+ // The size of the hidden argument in bytes.
+ uint8_t Size;
+ // The name of the hidden argument in the kernel signature.
+ const char *Name;
+ };
+
+ static constexpr HiddenArgInfo HiddenArgs[END_HIDDEN_ARGS] = {
+ {0, 4, "_hidden_block_count_x"}, {4, 4, "_hidden_block_count_y"},
+ {8, 4, "_hidden_block_count_z"}, {12, 2, "_hidden_group_size_x"},
+ {14, 2, "_hidden_group_size_y"}, {16, 2, "_hidden_group_size_z"},
+ {18, 2, "_hidden_remainder_x"}, {20, 2, "_hidden_remainder_y"},
+ {22, 2, "_hidden_remainder_z"}};
+
+ static HiddenArg getHiddenArgFromOffset(unsigned Offset) {
+ for (unsigned I = 0; I < END_HIDDEN_ARGS; ++I)
+ if (HiddenArgs[I].Offset == Offset)
+ return static_cast<HiddenArg>(I);
+
+ return END_HIDDEN_ARGS;
+ }
+
+ static Type *getHiddenArgType(LLVMContext &Ctx, HiddenArg HA) {
+ if (HA < END_HIDDEN_ARGS)
+ return Type::getIntNTy(Ctx, HiddenArgs[HA].Size * 8);
+
+ llvm_unreachable("Unexpected hidden argument.");
+ }
+
+ static const char *getHiddenArgName(HiddenArg HA) {
+ if (HA < END_HIDDEN_ARGS)
+ return HiddenArgs[HA].Name;
+
+ llvm_unreachable("Unexpected hidden argument.");
+ }
+
+ // Clones the function after adding implicit arguments to the argument list
+ // and returns the new updated function. Preloaded implicit arguments are
+ // added up to and including the last one that will be preloaded, indicated by
+ // LastPreloadIndex. Currently preloading is only performed on the totality of
+ // sequential data from the kernarg segment including implicit (hidden)
+ // arguments. This means that all arguments up to the last preloaded argument
+ // will also be preloaded even if that data is unused.
+ Function *cloneFunctionWithPreloadImplicitArgs(unsigned LastPreloadIndex) {
+ FunctionType *FT = F.getFunctionType();
+ LLVMContext &Ctx = F.getParent()->getContext();
+ SmallVector<Type *, 16> FTypes(FT->param_begin(), FT->param_end());
+ for (unsigned I = 0; I <= LastPreloadIndex; ++I)
+ FTypes.push_back(getHiddenArgType(Ctx, HiddenArg(I)));
+
+ FunctionType *NFT =
+ FunctionType::get(FT->getReturnType(), FTypes, FT->isVarArg());
+ Function *NF =
+ Function::Create(NFT, F.getLinkage(), F.getAddressSpace(), F.getName());
+
+ NF->copyAttributesFrom(&F);
+ NF->copyMetadata(&F, 0);
+ NF->setIsNewDbgInfoFormat(F.IsNewDbgInfoFormat);
+
+ F.getParent()->getFunctionList().insert(F.getIterator(), NF);
+ NF->takeName(&F);
+ NF->splice(NF->begin(), &F);
+
+ Function::arg_iterator NFArg = NF->arg_begin();
+ for (Argument &Arg : F.args()) {
+ Arg.replaceAllUsesWith(&*NFArg);
+ NFArg->takeName(&Arg);
+ ++NFArg;
+ }
+
+ AttrBuilder AB(Ctx);
+ AB.addAttribute(Attribute::InReg);
+ AB.addAttribute("amdgpu-hidden-argument");
+ AttributeList AL = NF->getAttributes();
+ for (unsigned I = 0; I <= LastPreloadIndex; ++I) {
+ AL = AL.addParamAttributes(Ctx, NFArg->getArgNo(), AB);
+ NFArg++->setName(getHiddenArgName(HiddenArg(I)));
+ }
+
+ NF->setAttributes(AL);
+ F.replaceAllUsesWith(NF);
+
+ return NF;
+ }
+
+public:
+ PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) {
+ setInitialFreeUserSGPRsCount();
+ }
+
+ // Returns the maximum number of user SGPRs that we have available to preload
+ // arguments.
+ void setInitialFreeUserSGPRsCount() {
+ GCNUserSGPRUsageInfo UserSGPRInfo(F, ST);
+ NumFreeUserSGPRs = UserSGPRInfo.getNumFreeUserSGPRs();
+ }
+
+ bool canPreloadKernArgAtOffset(uint64_t ExplicitArgOffset) {
+ return ExplicitArgOffset <= NumFreeUserSGPRs * 4;
+ }
+
+ // Try to allocate SGPRs to preload hidden kernel arguments.
+ void
+ tryAllocHiddenArgPreloadSGPRs(uint64_t ImplicitArgsBaseOffset,
+ SmallVectorImpl<Function *> &FunctionsToErase) {
+ Function *ImplicitArgPtr = Intrinsic::getDeclarationIfExists(
+ F.getParent(), Intrinsic::amdgcn_implicitarg_ptr);
+ if (!ImplicitArgPtr)
+ return;
+
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ // Pair is the load and the load offset.
+ SmallVector<std::pair<LoadInst *, unsigned>, 4> ImplicitArgLoads;
+ for (auto *U : ImplicitArgPtr->users()) {
+ Instruction *CI = dyn_cast<Instruction>(U);
+ if (!CI || CI->getParent()->getParent() != &F)
+ continue;
+
+ for (auto *U : CI->users()) {
+ int64_t Offset = 0;
+ auto *Load = dyn_cast<LoadInst>(U); // Load from ImplicitArgPtr?
+ if (!Load) {
+ if (GetPointerBaseWithConstantOffset(U, Offset, DL) != CI)
+ continue;
+
+ Load = dyn_cast<LoadInst>(*U->user_begin()); // Load from GEP?
+ }
+
+ if (!Load || !Load->isSimple())
+ continue;
+
+ // FIXME: Expand handle merged loads.
+ LLVMContext &Ctx = F.getParent()->getContext();
+ Type *LoadTy = Load->getType();
+ HiddenArg HA = getHiddenArgFromOffset(Offset);
+ if (HA == END_HIDDEN_ARGS || LoadTy != getHiddenArgType(Ctx, HA))
+ continue;
+
+ ImplicitArgLoads.push_back(std::make_pair(Load, Offset));
+ }
+ }
+
+ if (ImplicitArgLoads.empty())
+ return;
+
+ // Allocate loads in order of offset. We need to be sure that the implicit
+ // argument can actually be preloaded.
+ std::sort(ImplicitArgLoads.begin(), ImplicitArgLoads.end(), less_second());
+
+ // If we fail to preload any implicit argument we know we don't have SGPRs
+ // to preload any subsequent ones with larger offsets. Find the first
+ // argument that we cannot preload.
+ auto *PreloadEnd =
+ std::find_if(ImplicitArgLoads.begin(), ImplicitArgLoads.end(),
+ [&](const std::pair<LoadInst *, unsigned> &Load) {
+ unsigned LoadSize =
+ DL.getTypeStoreSize(Load.first->getType());
+ unsigned LoadOffset = Load.second;
+ if (!canPreloadKernArgAtOffset(LoadOffset + LoadSize +
+ ImplicitArgsBaseOffset))
+ return true;
+
+ return false;
+ });
+
+ if (PreloadEnd == ImplicitArgLoads.begin())
+ return;
+
+ unsigned LastHiddenArgIndex = getHiddenArgFromOffset(PreloadEnd[-1].second);
+ Function *NF = cloneFunctionWithPreloadImplicitArgs(LastHiddenArgIndex);
+ assert(NF);
+ FunctionsToErase.push_back(&F);
+ for (const auto *I = ImplicitArgLoads.begin(); I != PreloadEnd; ++I) {
+ LoadInst *LoadInst = I->first;
+ unsigned LoadOffset = I->second;
+ unsigned HiddenArgIndex = getHiddenArgFromOffset(LoadOffset);
+ unsigned Index = NF->arg_size() - LastHiddenArgIndex + HiddenArgIndex - 1;
+ Argument *Arg = NF->getArg(Index);
+ LoadInst->replaceAllUsesWith(Arg);
+ }
+ }
+};
+
+} // end anonymous namespace
+
+char AMDGPUPreloadKernelArgumentsLegacy::ID = 0;
+
+INITIALIZE_PASS(AMDGPUPreloadKernelArgumentsLegacy, DEBUG_TYPE,
+ "AMDGPU Preload Kernel Arguments", false, false)
+
+ModulePass *
+llvm::createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *TM) {
+ return new AMDGPUPreloadKernelArgumentsLegacy(
+ static_cast<const GCNTargetMachine *>(TM));
+}
+
+AMDGPUPreloadKernelArgumentsLegacy::AMDGPUPreloadKernelArgumentsLegacy(
+ const GCNTargetMachine *TM)
+ : ModulePass(ID), TM(TM) {}
+
+static bool markKernelArgsAsInreg(Module &M, const TargetMachine &TM) {
+ SmallVector<Function *, 4> FunctionsToErase;
+ bool Changed = false;
+ for (auto &F : M) {
+ const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+ if (!ST.hasKernargPreload() ||
+ F.getCallingConv() != CallingConv::AMDGPU_KERNEL)
+ continue;
+
+ PreloadKernelArgInfo PreloadInfo(F, ST);
+ uint64_t ExplicitArgOffset = 0;
+ const DataLayout &DL = F.getDataLayout();
+ const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
+ unsigned NumPreloadsRequested = KernargPreloadCount;
+ unsigned NumPreloadedExplicitArgs = 0;
+ for (Argument &Arg : F.args()) {
+ // Avoid incompatible attributes and guard against running this pass
+ // twice.
+ //
+ // TODO: Preload byref kernel arguments
+ if (Arg.hasByRefAttr() || Arg.hasNestAttr() ||
+ Arg.hasAttribute("amdgpu-hidden-argument"))
+ break;
+
+ // Inreg may be pre-existing on some arguments, try to preload these.
+ if (NumPreloadsRequested == 0 && !Arg.hasInRegAttr())
+ break;
+
+ // FIXME: Preload aggregates.
+ if (Arg.getType()->isAggregateType())
+ break;
+
+ Type *ArgTy = Arg.getType();
+ Align ABITypeAlign = DL.getABITypeAlign(ArgTy);
+ uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
+ ExplicitArgOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
+
+ if (!PreloadInfo.canPreloadKernArgAtOffset(ExplicitArgOffset))
+ break;
+
+ Arg.addAttr(Attribute::InReg);
+ NumPreloadedExplicitArgs++;
+ if (NumPreloadsRequested > 0)
+ NumPreloadsRequested--;
+ }
+
+ // Only try preloading hidden arguments if we can successfully preload the
+ // last explicit argument.
+ if (NumPreloadedExplicitArgs == F.arg_size()) {
+ uint64_t ImplicitArgsBaseOffset =
+ alignTo(ExplicitArgOffset, ST.getAlignmentForImplicitArgPtr()) +
+ BaseOffset;
+ PreloadInfo.tryAllocHiddenArgPreloadSGPRs(ImplicitArgsBaseOffset,
+ FunctionsToErase);
+ }
+
+ Changed |= NumPreloadedExplicitArgs > 0;
+ }
+
+ // Erase cloned functions if we needed to update the kernel signature to
+ // support preloading hidden kernel arguments.
+ for (auto *F : FunctionsToErase)
+ F->eraseFromParent();
+
+ return Changed;
+}
+
+bool AMDGPUPreloadKernelArgumentsLegacy::runOnModule(Module &M) {
+ if (skipModule(M) || !TM)
+ return false;
+
+ return markKernelArgsAsInreg(M, *TM);
+}
+
+PreservedAnalyses
+AMDGPUPreloadKernelArgumentsPass::run(Module &M, ModuleAnalysisManager &AM) {
+ bool Changed = markKernelArgsAsInreg(M, TM);
+ return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index c22b27a..ccb251b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -566,6 +566,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeGCNRegPressurePrinterPass(*PR);
initializeAMDGPUPreloadKernArgPrologLegacyPass(*PR);
initializeAMDGPUWaitSGPRHazardsLegacyPass(*PR);
+ initializeAMDGPUPreloadKernelArgumentsLegacyPass(*PR);
}
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -1321,6 +1322,10 @@ void AMDGPUPassConfig::addIRPasses() {
}
void AMDGPUPassConfig::addCodeGenPrepare() {
+ if (TM->getTargetTriple().isAMDGCN() &&
+ TM->getOptLevel() > CodeGenOptLevel::None)
+ addPass(createAMDGPUPreloadKernelArgumentsLegacyPass(TM));
+
if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments)
addPass(createAMDGPULowerKernelArgumentsPass());
@@ -2050,6 +2055,9 @@ void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
// AMDGPUAnnotateKernelFeaturesPass is missing here, but it will hopefully be
// deleted soon.
+ if (TM.getOptLevel() > CodeGenOptLevel::None)
+ addPass(AMDGPUPreloadKernelArgumentsPass(TM));
+
if (EnableLowerKernelArguments)
addPass(AMDGPULowerKernelArgumentsPass(TM));
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 09a3096..c6d70ee 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -89,6 +89,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUPostLegalizerCombiner.cpp
AMDGPUPreLegalizerCombiner.cpp
AMDGPUPreloadKernArgProlog.cpp
+ AMDGPUPreloadKernelArguments.cpp
AMDGPUPrintfRuntimeBinding.cpp
AMDGPUPromoteAlloca.cpp
AMDGPUPromoteKernelArguments.cpp
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index b71a1fa..2505a67 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -1442,9 +1442,8 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
#include "ARMGenMCPseudoLowering.inc"
void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) {
- // TODOD FIXME: Enable feature predicate checks once all the test pass.
- // ARM_MC::verifyInstructionPredicates(MI->getOpcode(),
- // getSubtargetInfo().getFeatureBits());
+ ARM_MC::verifyInstructionPredicates(MI->getOpcode(),
+ getSubtargetInfo().getFeatureBits());
const DataLayout &DL = getDataLayout();
MCTargetStreamer &TS = *OutStreamer->getTargetStreamer();
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 3569036..6c3a1ae 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -2704,8 +2704,7 @@ bool ARMTTIImpl::preferInLoopReduction(RecurKind Kind, Type *Ty) const {
}
}
-bool ARMTTIImpl::preferPredicatedReductionSelect(unsigned Opcode,
- Type *Ty) const {
+bool ARMTTIImpl::preferPredicatedReductionSelect() const {
if (!ST->hasMVEIntegerOps())
return false;
return true;
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index 2ce4496..20a2c59 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -230,8 +230,7 @@ public:
bool preferInLoopReduction(RecurKind Kind, Type *Ty) const override;
- bool preferPredicatedReductionSelect(unsigned Opcode,
- Type *Ty) const override;
+ bool preferPredicatedReductionSelect() const override;
bool shouldExpandReduction(const IntrinsicInst *II) const override {
return false;
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index c73ff83..eca6828 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -220,40 +220,6 @@ AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
setMinimumJumpTableEntries(UINT_MAX);
}
-const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const {
-#define NODE(name) \
- case AVRISD::name: \
- return #name
-
- switch (Opcode) {
- default:
- return nullptr;
- NODE(RET_GLUE);
- NODE(RETI_GLUE);
- NODE(CALL);
- NODE(WRAPPER);
- NODE(LSL);
- NODE(LSLW);
- NODE(LSR);
- NODE(LSRW);
- NODE(ROL);
- NODE(ROR);
- NODE(ASR);
- NODE(ASRW);
- NODE(LSLLOOP);
- NODE(LSRLOOP);
- NODE(ROLLOOP);
- NODE(RORLOOP);
- NODE(ASRLOOP);
- NODE(BRCOND);
- NODE(CMP);
- NODE(CMPC);
- NODE(TST);
- NODE(SELECT_CC);
-#undef NODE
- }
-}
-
EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
EVT VT) const {
assert(!VT.isVector() && "No AVR SetCC type for vectors!");
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.h b/llvm/lib/Target/AVR/AVRISelLowering.h
index cd45444..2ae22b2 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.h
+++ b/llvm/lib/Target/AVR/AVRISelLowering.h
@@ -19,64 +19,6 @@
namespace llvm {
-namespace AVRISD {
-
-/// AVR Specific DAG Nodes
-enum NodeType {
- /// Start the numbering where the builtin ops leave off.
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
- /// Return from subroutine.
- RET_GLUE,
- /// Return from ISR.
- RETI_GLUE,
- /// Represents an abstract call instruction,
- /// which includes a bunch of information.
- CALL,
- /// A wrapper node for TargetConstantPool,
- /// TargetExternalSymbol, and TargetGlobalAddress.
- WRAPPER,
- LSL, ///< Logical shift left.
- LSLBN, ///< Byte logical shift left N bits.
- LSLWN, ///< Word logical shift left N bits.
- LSLHI, ///< Higher 8-bit of word logical shift left.
- LSLW, ///< Wide logical shift left.
- LSR, ///< Logical shift right.
- LSRBN, ///< Byte logical shift right N bits.
- LSRWN, ///< Word logical shift right N bits.
- LSRLO, ///< Lower 8-bit of word logical shift right.
- LSRW, ///< Wide logical shift right.
- ASR, ///< Arithmetic shift right.
- ASRBN, ///< Byte arithmetic shift right N bits.
- ASRWN, ///< Word arithmetic shift right N bits.
- ASRLO, ///< Lower 8-bit of word arithmetic shift right.
- ASRW, ///< Wide arithmetic shift right.
- ROR, ///< Bit rotate right.
- ROL, ///< Bit rotate left.
- LSLLOOP, ///< A loop of single logical shift left instructions.
- LSRLOOP, ///< A loop of single logical shift right instructions.
- ROLLOOP, ///< A loop of single left bit rotate instructions.
- RORLOOP, ///< A loop of single right bit rotate instructions.
- ASRLOOP, ///< A loop of single arithmetic shift right instructions.
- /// AVR conditional branches. Operand 0 is the chain operand, operand 1
- /// is the block to branch if condition is true, operand 2 is the
- /// condition code, and operand 3 is the flag operand produced by a CMP
- /// or TEST instruction.
- BRCOND,
- /// Compare instruction.
- CMP,
- /// Compare with carry instruction.
- CMPC,
- /// Test for zero or minus instruction.
- TST,
- /// Swap Rd[7:4] <-> Rd[3:0].
- SWAP,
- /// Operand 0 and operand 1 are selection variable, operand 2
- /// is condition code and operand 3 is flag operand.
- SELECT_CC
-};
-
-} // end of namespace AVRISD
-
class AVRSubtarget;
class AVRTargetMachine;
@@ -95,8 +37,6 @@ public:
return MVT::i8;
}
- const char *getTargetNodeName(unsigned Opcode) const override;
-
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.td b/llvm/lib/Target/AVR/AVRInstrInfo.td
index b00938c..958e138 100644
--- a/llvm/lib/Target/AVR/AVRInstrInfo.td
+++ b/llvm/lib/Target/AVR/AVRInstrInfo.td
@@ -32,46 +32,67 @@ def SDT_AVRSelectCC
// AVR Specific Node Definitions
//===----------------------------------------------------------------------===//
+// Return from subroutine.
def AVRretglue : SDNode<"AVRISD::RET_GLUE", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+
+// Return from ISR.
def AVRretiglue : SDNode<"AVRISD::RETI_GLUE", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+
def AVRcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_AVRCallSeqStart,
[SDNPHasChain, SDNPOutGlue]>;
def AVRcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_AVRCallSeqEnd,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+// Represents an abstract call instruction,
+// which includes a bunch of information.
def AVRcall : SDNode<"AVRISD::CALL", SDT_AVRCall,
[SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
+// A wrapper node for TargetConstantPool,
+// TargetExternalSymbol, and TargetGlobalAddress.
def AVRWrapper : SDNode<"AVRISD::WRAPPER", SDT_AVRWrapper>;
+// AVR conditional branches. Operand 0 is the chain operand, operand 1
+// is the block to branch if condition is true, operand 2 is the
+// condition code, and operand 3 is the flag operand produced by a CMP
+// or TEST instruction.
def AVRbrcond
: SDNode<"AVRISD::BRCOND", SDT_AVRBrcond, [SDNPHasChain, SDNPInGlue]>;
+
+// Compare instruction.
def AVRcmp : SDNode<"AVRISD::CMP", SDT_AVRCmp, [SDNPOutGlue]>;
+
+// Compare with carry instruction.
def AVRcmpc : SDNode<"AVRISD::CMPC", SDT_AVRCmp, [SDNPInGlue, SDNPOutGlue]>;
+
+// Test for zero or minus instruction.
def AVRtst : SDNode<"AVRISD::TST", SDT_AVRTst, [SDNPOutGlue]>;
+
+// Operand 0 and operand 1 are selection variable, operand 2
+// is condition code and operand 3 is flag operand.
def AVRselectcc : SDNode<"AVRISD::SELECT_CC", SDT_AVRSelectCC, [SDNPInGlue]>;
// Shift nodes.
-def AVRlsl : SDNode<"AVRISD::LSL", SDTIntUnaryOp>;
-def AVRlsr : SDNode<"AVRISD::LSR", SDTIntUnaryOp>;
-def AVRrol : SDNode<"AVRISD::ROL", SDTIntUnaryOp>;
-def AVRror : SDNode<"AVRISD::ROR", SDTIntUnaryOp>;
-def AVRasr : SDNode<"AVRISD::ASR", SDTIntUnaryOp>;
-def AVRlslhi : SDNode<"AVRISD::LSLHI", SDTIntUnaryOp>;
-def AVRlsrlo : SDNode<"AVRISD::LSRLO", SDTIntUnaryOp>;
-def AVRasrlo : SDNode<"AVRISD::ASRLO", SDTIntUnaryOp>;
-def AVRlslbn : SDNode<"AVRISD::LSLBN", SDTIntBinOp>;
-def AVRlsrbn : SDNode<"AVRISD::LSRBN", SDTIntBinOp>;
-def AVRasrbn : SDNode<"AVRISD::ASRBN", SDTIntBinOp>;
-def AVRlslwn : SDNode<"AVRISD::LSLWN", SDTIntBinOp>;
-def AVRlsrwn : SDNode<"AVRISD::LSRWN", SDTIntBinOp>;
-def AVRasrwn : SDNode<"AVRISD::ASRWN", SDTIntBinOp>;
-def AVRlslw : SDNode<"AVRISD::LSLW", SDTIntShiftPairOp>;
-def AVRlsrw : SDNode<"AVRISD::LSRW", SDTIntShiftPairOp>;
-def AVRasrw : SDNode<"AVRISD::ASRW", SDTIntShiftPairOp>;
+def AVRlsl : SDNode<"AVRISD::LSL", SDTIntUnaryOp>; // Logical shift left.
+def AVRlsr : SDNode<"AVRISD::LSR", SDTIntUnaryOp>; // Logical shift right.
+def AVRrol : SDNode<"AVRISD::ROL", SDTIntUnaryOp>; // Bit rotate left.
+def AVRror : SDNode<"AVRISD::ROR", SDTIntUnaryOp>; // Bit rotate right.
+def AVRasr : SDNode<"AVRISD::ASR", SDTIntUnaryOp>; // Arithmetic shift right.
+def AVRlslhi : SDNode<"AVRISD::LSLHI", SDTIntUnaryOp>; // Higher 8-bit of word logical shift left.
+def AVRlsrlo : SDNode<"AVRISD::LSRLO", SDTIntUnaryOp>; // Lower 8-bit of word logical shift right.
+def AVRasrlo : SDNode<"AVRISD::ASRLO", SDTIntUnaryOp>; // Lower 8-bit of word arithmetic shift right.
+def AVRlslbn : SDNode<"AVRISD::LSLBN", SDTIntBinOp>; // Byte logical shift left N bits.
+def AVRlsrbn : SDNode<"AVRISD::LSRBN", SDTIntBinOp>; // Byte logical shift right N bits.
+def AVRasrbn : SDNode<"AVRISD::ASRBN", SDTIntBinOp>; // Byte arithmetic shift right N bits.
+def AVRlslwn : SDNode<"AVRISD::LSLWN", SDTIntBinOp>; // Higher 8-bit of word logical shift left.
+def AVRlsrwn : SDNode<"AVRISD::LSRWN", SDTIntBinOp>; // Word logical shift right N bits.
+def AVRasrwn : SDNode<"AVRISD::ASRWN", SDTIntBinOp>; // Word arithmetic shift right N bits.
+def AVRlslw : SDNode<"AVRISD::LSLW", SDTIntShiftPairOp>; // Wide logical shift left.
+def AVRlsrw : SDNode<"AVRISD::LSRW", SDTIntShiftPairOp>; // Wide logical shift right.
+def AVRasrw : SDNode<"AVRISD::ASRW", SDTIntShiftPairOp>; // Wide arithmetic shift right.
// Pseudo shift nodes for non-constant shift amounts.
def AVRlslLoop : SDNode<"AVRISD::LSLLOOP", SDTIntShiftOp>;
diff --git a/libclc/generic/lib/math/half_binary.inc b/llvm/lib/Target/AVR/AVRSelectionDAGInfo.cpp
index adb97f3..5a685ab 100644
--- a/libclc/generic/lib/math/half_binary.inc
+++ b/llvm/lib/Target/AVR/AVRSelectionDAGInfo.cpp
@@ -6,12 +6,14 @@
//
//===----------------------------------------------------------------------===//
-#include <clc/utils.h>
+#include "AVRSelectionDAGInfo.h"
-#define __CLC_HALF_FUNC(x) __CLC_CONCAT(half_, x)
+#define GET_SDNODE_DESC
+#include "AVRGenSDNodeInfo.inc"
-_CLC_OVERLOAD _CLC_DEF __CLC_GENTYPE __CLC_HALF_FUNC(__CLC_FUNC)(__CLC_GENTYPE x, __CLC_GENTYPE y) {
- return __CLC_FUNC(x, y);
-}
+using namespace llvm;
-#undef __CLC_HALF_FUNC
+AVRSelectionDAGInfo::AVRSelectionDAGInfo()
+ : SelectionDAGGenTargetInfo(AVRGenSDNodeInfo) {}
+
+AVRSelectionDAGInfo::~AVRSelectionDAGInfo() = default;
diff --git a/llvm/lib/Target/AVR/AVRSelectionDAGInfo.h b/llvm/lib/Target/AVR/AVRSelectionDAGInfo.h
index 3e7bd57..0edac1e 100644
--- a/llvm/lib/Target/AVR/AVRSelectionDAGInfo.h
+++ b/llvm/lib/Target/AVR/AVRSelectionDAGInfo.h
@@ -15,11 +15,17 @@
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#define GET_SDNODE_ENUM
+#include "AVRGenSDNodeInfo.inc"
+
namespace llvm {
/// Holds information about the AVR instruction selection DAG.
-class AVRSelectionDAGInfo : public SelectionDAGTargetInfo {
+class AVRSelectionDAGInfo : public SelectionDAGGenTargetInfo {
public:
+ AVRSelectionDAGInfo();
+
+ ~AVRSelectionDAGInfo() override;
};
} // end namespace llvm
diff --git a/llvm/lib/Target/AVR/CMakeLists.txt b/llvm/lib/Target/AVR/CMakeLists.txt
index 817ba73..781dac0 100644
--- a/llvm/lib/Target/AVR/CMakeLists.txt
+++ b/llvm/lib/Target/AVR/CMakeLists.txt
@@ -10,6 +10,7 @@ tablegen(LLVM AVRGenDisassemblerTables.inc -gen-disassembler)
tablegen(LLVM AVRGenInstrInfo.inc -gen-instr-info)
tablegen(LLVM AVRGenMCCodeEmitter.inc -gen-emitter)
tablegen(LLVM AVRGenRegisterInfo.inc -gen-register-info)
+tablegen(LLVM AVRGenSDNodeInfo.inc -gen-sd-node-info)
tablegen(LLVM AVRGenSubtargetInfo.inc -gen-subtarget)
add_public_tablegen_target(AVRCommonTableGen)
@@ -23,6 +24,7 @@ add_llvm_target(AVRCodeGen
AVRISelLowering.cpp
AVRMCInstLower.cpp
AVRRegisterInfo.cpp
+ AVRSelectionDAGInfo.cpp
AVRShiftExpand.cpp
AVRSubtarget.cpp
AVRTargetMachine.cpp
diff --git a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
index db310f5..cff8d63 100644
--- a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
+++ b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
@@ -320,8 +320,7 @@ static Value *expandIsFPClass(CallInst *Orig) {
return RetVal;
}
default:
- report_fatal_error(Twine("Unsupported FPClassTest"),
- /* gen_crash_diag=*/false);
+ reportFatalUsageError("Unsupported FPClassTest");
}
}
diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
index 5ce3c7b..1d79c30 100644
--- a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
+++ b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
@@ -1974,7 +1974,7 @@ void DXILBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
unsigned(IA->getDialect() & 1) << 2);
// Add the asm string.
- const std::string &AsmStr = IA->getAsmString();
+ StringRef AsmStr = IA->getAsmString();
Record.push_back(AsmStr.size());
Record.append(AsmStr.begin(), AsmStr.end());
diff --git a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
index bc51b29..f38e7b8 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
@@ -107,11 +107,7 @@ namespace {
return !operator==(R);
}
bool operator<(const OffsetRange &R) const {
- if (Min != R.Min)
- return Min < R.Min;
- if (Max != R.Max)
- return Max < R.Max;
- return Align < R.Align;
+ return std::tie(Min, Max, Align) < std::tie(R.Min, R.Max, R.Align);
}
static OffsetRange zero() { return {0, 0, 1}; }
};
diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index a0a67be..7404791 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -77,27 +77,7 @@ namespace {
static uint32_t deduce(const Constant *C);
};
- // A representation of a register as it can appear in a MachineOperand,
- // i.e. a pair register:subregister.
-
- // FIXME: Use TargetInstrInfo::RegSubRegPair. Also duplicated in
- // HexagonGenPredicate
- struct RegisterSubReg {
- Register Reg;
- unsigned SubReg;
-
- explicit RegisterSubReg(unsigned R, unsigned SR = 0) : Reg(R), SubReg(SR) {}
- explicit RegisterSubReg(const MachineOperand &MO)
- : Reg(MO.getReg()), SubReg(MO.getSubReg()) {}
-
- void print(const TargetRegisterInfo *TRI = nullptr) const {
- dbgs() << printReg(Reg, TRI, SubReg);
- }
-
- bool operator== (const RegisterSubReg &R) const {
- return (Reg == R.Reg) && (SubReg == R.SubReg);
- }
- };
+ using RegSubRegPair = TargetInstrInfo::RegSubRegPair;
// Lattice cell, based on that was described in the W-Z paper on constant
// propagation.
@@ -312,7 +292,7 @@ namespace {
using CellMap = MachineConstPropagator::CellMap;
virtual bool evaluate(const MachineInstr &MI, const CellMap &Inputs,
CellMap &Outputs) = 0;
- virtual bool evaluate(const RegisterSubReg &R, const LatticeCell &SrcC,
+ virtual bool evaluate(const RegSubRegPair &R, const LatticeCell &SrcC,
LatticeCell &Result) = 0;
virtual bool evaluate(const MachineInstr &BrI, const CellMap &Inputs,
SetVector<const MachineBasicBlock*> &Targets,
@@ -355,17 +335,19 @@ namespace {
// Helper functions.
- bool getCell(const RegisterSubReg &R, const CellMap &Inputs, LatticeCell &RC);
+ bool getCell(const RegSubRegPair &R, const CellMap &Inputs,
+ LatticeCell &RC);
bool constToInt(const Constant *C, APInt &Val) const;
const ConstantInt *intToConst(const APInt &Val) const;
// Compares.
- bool evaluateCMPrr(uint32_t Cmp, const RegisterSubReg &R1, const RegisterSubReg &R2,
- const CellMap &Inputs, bool &Result);
- bool evaluateCMPri(uint32_t Cmp, const RegisterSubReg &R1, const APInt &A2,
- const CellMap &Inputs, bool &Result);
- bool evaluateCMPrp(uint32_t Cmp, const RegisterSubReg &R1, uint64_t Props2,
- const CellMap &Inputs, bool &Result);
+ bool evaluateCMPrr(uint32_t Cmp, const RegSubRegPair &R1,
+ const RegSubRegPair &R2, const CellMap &Inputs,
+ bool &Result);
+ bool evaluateCMPri(uint32_t Cmp, const RegSubRegPair &R1, const APInt &A2,
+ const CellMap &Inputs, bool &Result);
+ bool evaluateCMPrp(uint32_t Cmp, const RegSubRegPair &R1, uint64_t Props2,
+ const CellMap &Inputs, bool &Result);
bool evaluateCMPii(uint32_t Cmp, const APInt &A1, const APInt &A2,
bool &Result);
bool evaluateCMPpi(uint32_t Cmp, uint32_t Props, const APInt &A2,
@@ -373,53 +355,53 @@ namespace {
bool evaluateCMPpp(uint32_t Cmp, uint32_t Props1, uint32_t Props2,
bool &Result);
- bool evaluateCOPY(const RegisterSubReg &R1, const CellMap &Inputs,
- LatticeCell &Result);
+ bool evaluateCOPY(const RegSubRegPair &R1, const CellMap &Inputs,
+ LatticeCell &Result);
// Logical operations.
- bool evaluateANDrr(const RegisterSubReg &R1, const RegisterSubReg &R2,
- const CellMap &Inputs, LatticeCell &Result);
- bool evaluateANDri(const RegisterSubReg &R1, const APInt &A2,
- const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateANDrr(const RegSubRegPair &R1, const RegSubRegPair &R2,
+ const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateANDri(const RegSubRegPair &R1, const APInt &A2,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateANDii(const APInt &A1, const APInt &A2, APInt &Result);
- bool evaluateORrr(const RegisterSubReg &R1, const RegisterSubReg &R2,
- const CellMap &Inputs, LatticeCell &Result);
- bool evaluateORri(const RegisterSubReg &R1, const APInt &A2,
- const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateORrr(const RegSubRegPair &R1, const RegSubRegPair &R2,
+ const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateORri(const RegSubRegPair &R1, const APInt &A2,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateORii(const APInt &A1, const APInt &A2, APInt &Result);
- bool evaluateXORrr(const RegisterSubReg &R1, const RegisterSubReg &R2,
- const CellMap &Inputs, LatticeCell &Result);
- bool evaluateXORri(const RegisterSubReg &R1, const APInt &A2,
- const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateXORrr(const RegSubRegPair &R1, const RegSubRegPair &R2,
+ const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateXORri(const RegSubRegPair &R1, const APInt &A2,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateXORii(const APInt &A1, const APInt &A2, APInt &Result);
// Extensions.
- bool evaluateZEXTr(const RegisterSubReg &R1, unsigned Width, unsigned Bits,
- const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateZEXTr(const RegSubRegPair &R1, unsigned Width, unsigned Bits,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateZEXTi(const APInt &A1, unsigned Width, unsigned Bits,
APInt &Result);
- bool evaluateSEXTr(const RegisterSubReg &R1, unsigned Width, unsigned Bits,
- const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateSEXTr(const RegSubRegPair &R1, unsigned Width, unsigned Bits,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateSEXTi(const APInt &A1, unsigned Width, unsigned Bits,
APInt &Result);
// Leading/trailing bits.
- bool evaluateCLBr(const RegisterSubReg &R1, bool Zeros, bool Ones,
- const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateCLBr(const RegSubRegPair &R1, bool Zeros, bool Ones,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateCLBi(const APInt &A1, bool Zeros, bool Ones, APInt &Result);
- bool evaluateCTBr(const RegisterSubReg &R1, bool Zeros, bool Ones,
- const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateCTBr(const RegSubRegPair &R1, bool Zeros, bool Ones,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateCTBi(const APInt &A1, bool Zeros, bool Ones, APInt &Result);
// Bitfield extract.
- bool evaluateEXTRACTr(const RegisterSubReg &R1, unsigned Width, unsigned Bits,
- unsigned Offset, bool Signed, const CellMap &Inputs,
- LatticeCell &Result);
+ bool evaluateEXTRACTr(const RegSubRegPair &R1, unsigned Width,
+ unsigned Bits, unsigned Offset, bool Signed,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateEXTRACTi(const APInt &A1, unsigned Bits, unsigned Offset,
bool Signed, APInt &Result);
// Vector operations.
- bool evaluateSplatr(const RegisterSubReg &R1, unsigned Bits, unsigned Count,
- const CellMap &Inputs, LatticeCell &Result);
+ bool evaluateSplatr(const RegSubRegPair &R1, unsigned Bits, unsigned Count,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateSplati(const APInt &A1, unsigned Bits, unsigned Count,
APInt &Result);
};
@@ -630,7 +612,7 @@ void MachineConstPropagator::visitPHI(const MachineInstr &PN) {
LLVM_DEBUG(dbgs() << "Visiting FI(" << printMBBReference(*MB) << "): " << PN);
const MachineOperand &MD = PN.getOperand(0);
- RegisterSubReg DefR(MD);
+ RegSubRegPair DefR(getRegSubRegPair(MD));
assert(DefR.Reg.isVirtual());
bool Changed = false;
@@ -657,7 +639,7 @@ Bottomize:
continue;
}
const MachineOperand &SO = PN.getOperand(i);
- RegisterSubReg UseR(SO);
+ RegSubRegPair UseR(getRegSubRegPair(SO));
// If the input is not a virtual register, we don't really know what
// value it holds.
if (!UseR.Reg.isVirtual())
@@ -700,7 +682,7 @@ void MachineConstPropagator::visitNonBranch(const MachineInstr &MI) {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
- RegisterSubReg DefR(MO);
+ RegSubRegPair DefR(getRegSubRegPair(MO));
// Only track virtual registers.
if (!DefR.Reg.isVirtual())
continue;
@@ -1075,8 +1057,8 @@ bool MachineConstPropagator::run(MachineFunction &MF) {
// --------------------------------------------------------------------
// Machine const evaluator.
-bool MachineConstEvaluator::getCell(const RegisterSubReg &R, const CellMap &Inputs,
- LatticeCell &RC) {
+bool MachineConstEvaluator::getCell(const RegSubRegPair &R,
+ const CellMap &Inputs, LatticeCell &RC) {
if (!R.Reg.isVirtual())
return false;
const LatticeCell &L = Inputs.get(R.Reg);
@@ -1101,8 +1083,9 @@ const ConstantInt *MachineConstEvaluator::intToConst(const APInt &Val) const {
return ConstantInt::get(CX, Val);
}
-bool MachineConstEvaluator::evaluateCMPrr(uint32_t Cmp, const RegisterSubReg &R1,
- const RegisterSubReg &R2, const CellMap &Inputs, bool &Result) {
+bool MachineConstEvaluator::evaluateCMPrr(uint32_t Cmp, const RegSubRegPair &R1,
+ const RegSubRegPair &R2,
+ const CellMap &Inputs, bool &Result) {
assert(Inputs.has(R1.Reg) && Inputs.has(R2.Reg));
LatticeCell LS1, LS2;
if (!getCell(R1, Inputs, LS1) || !getCell(R2, Inputs, LS2))
@@ -1140,8 +1123,9 @@ bool MachineConstEvaluator::evaluateCMPrr(uint32_t Cmp, const RegisterSubReg &R1
return IsTrue || IsFalse;
}
-bool MachineConstEvaluator::evaluateCMPri(uint32_t Cmp, const RegisterSubReg &R1,
- const APInt &A2, const CellMap &Inputs, bool &Result) {
+bool MachineConstEvaluator::evaluateCMPri(uint32_t Cmp, const RegSubRegPair &R1,
+ const APInt &A2,
+ const CellMap &Inputs, bool &Result) {
assert(Inputs.has(R1.Reg));
LatticeCell LS;
if (!getCell(R1, Inputs, LS))
@@ -1167,8 +1151,9 @@ bool MachineConstEvaluator::evaluateCMPri(uint32_t Cmp, const RegisterSubReg &R1
return IsTrue || IsFalse;
}
-bool MachineConstEvaluator::evaluateCMPrp(uint32_t Cmp, const RegisterSubReg &R1,
- uint64_t Props2, const CellMap &Inputs, bool &Result) {
+bool MachineConstEvaluator::evaluateCMPrp(uint32_t Cmp, const RegSubRegPair &R1,
+ uint64_t Props2,
+ const CellMap &Inputs, bool &Result) {
assert(Inputs.has(R1.Reg));
LatticeCell LS;
if (!getCell(R1, Inputs, LS))
@@ -1360,13 +1345,16 @@ bool MachineConstEvaluator::evaluateCMPpp(uint32_t Cmp, uint32_t Props1,
return false;
}
-bool MachineConstEvaluator::evaluateCOPY(const RegisterSubReg &R1,
- const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateCOPY(const RegSubRegPair &R1,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
return getCell(R1, Inputs, Result);
}
-bool MachineConstEvaluator::evaluateANDrr(const RegisterSubReg &R1,
- const RegisterSubReg &R2, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateANDrr(const RegSubRegPair &R1,
+ const RegSubRegPair &R2,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg) && Inputs.has(R2.Reg));
const LatticeCell &L1 = Inputs.get(R2.Reg);
const LatticeCell &L2 = Inputs.get(R2.Reg);
@@ -1396,8 +1384,10 @@ bool MachineConstEvaluator::evaluateANDrr(const RegisterSubReg &R1,
return !Result.isBottom();
}
-bool MachineConstEvaluator::evaluateANDri(const RegisterSubReg &R1,
- const APInt &A2, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateANDri(const RegSubRegPair &R1,
+ const APInt &A2,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
if (A2 == -1)
return getCell(R1, Inputs, Result);
@@ -1432,8 +1422,10 @@ bool MachineConstEvaluator::evaluateANDii(const APInt &A1,
return true;
}
-bool MachineConstEvaluator::evaluateORrr(const RegisterSubReg &R1,
- const RegisterSubReg &R2, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateORrr(const RegSubRegPair &R1,
+ const RegSubRegPair &R2,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg) && Inputs.has(R2.Reg));
const LatticeCell &L1 = Inputs.get(R2.Reg);
const LatticeCell &L2 = Inputs.get(R2.Reg);
@@ -1463,8 +1455,9 @@ bool MachineConstEvaluator::evaluateORrr(const RegisterSubReg &R1,
return !Result.isBottom();
}
-bool MachineConstEvaluator::evaluateORri(const RegisterSubReg &R1,
- const APInt &A2, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateORri(const RegSubRegPair &R1,
+ const APInt &A2, const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
if (A2 == 0)
return getCell(R1, Inputs, Result);
@@ -1499,8 +1492,10 @@ bool MachineConstEvaluator::evaluateORii(const APInt &A1,
return true;
}
-bool MachineConstEvaluator::evaluateXORrr(const RegisterSubReg &R1,
- const RegisterSubReg &R2, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateXORrr(const RegSubRegPair &R1,
+ const RegSubRegPair &R2,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg) && Inputs.has(R2.Reg));
LatticeCell LS1, LS2;
if (!getCell(R1, Inputs, LS1) || !getCell(R2, Inputs, LS2))
@@ -1528,8 +1523,10 @@ bool MachineConstEvaluator::evaluateXORrr(const RegisterSubReg &R1,
return !Result.isBottom();
}
-bool MachineConstEvaluator::evaluateXORri(const RegisterSubReg &R1,
- const APInt &A2, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateXORri(const RegSubRegPair &R1,
+ const APInt &A2,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
LatticeCell LS1;
if (!getCell(R1, Inputs, LS1))
@@ -1561,8 +1558,10 @@ bool MachineConstEvaluator::evaluateXORii(const APInt &A1,
return true;
}
-bool MachineConstEvaluator::evaluateZEXTr(const RegisterSubReg &R1, unsigned Width,
- unsigned Bits, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateZEXTr(const RegSubRegPair &R1,
+ unsigned Width, unsigned Bits,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
LatticeCell LS1;
if (!getCell(R1, Inputs, LS1))
@@ -1592,8 +1591,10 @@ bool MachineConstEvaluator::evaluateZEXTi(const APInt &A1, unsigned Width,
return true;
}
-bool MachineConstEvaluator::evaluateSEXTr(const RegisterSubReg &R1, unsigned Width,
- unsigned Bits, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateSEXTr(const RegSubRegPair &R1,
+ unsigned Width, unsigned Bits,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
LatticeCell LS1;
if (!getCell(R1, Inputs, LS1))
@@ -1657,8 +1658,9 @@ bool MachineConstEvaluator::evaluateSEXTi(const APInt &A1, unsigned Width,
return true;
}
-bool MachineConstEvaluator::evaluateCLBr(const RegisterSubReg &R1, bool Zeros,
- bool Ones, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateCLBr(const RegSubRegPair &R1, bool Zeros,
+ bool Ones, const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
LatticeCell LS1;
if (!getCell(R1, Inputs, LS1))
@@ -1692,8 +1694,9 @@ bool MachineConstEvaluator::evaluateCLBi(const APInt &A1, bool Zeros,
return true;
}
-bool MachineConstEvaluator::evaluateCTBr(const RegisterSubReg &R1, bool Zeros,
- bool Ones, const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateCTBr(const RegSubRegPair &R1, bool Zeros,
+ bool Ones, const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
LatticeCell LS1;
if (!getCell(R1, Inputs, LS1))
@@ -1727,9 +1730,11 @@ bool MachineConstEvaluator::evaluateCTBi(const APInt &A1, bool Zeros,
return true;
}
-bool MachineConstEvaluator::evaluateEXTRACTr(const RegisterSubReg &R1,
- unsigned Width, unsigned Bits, unsigned Offset, bool Signed,
- const CellMap &Inputs, LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateEXTRACTr(const RegSubRegPair &R1,
+ unsigned Width, unsigned Bits,
+ unsigned Offset, bool Signed,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
assert(Bits+Offset <= Width);
LatticeCell LS1;
@@ -1785,9 +1790,10 @@ bool MachineConstEvaluator::evaluateEXTRACTi(const APInt &A1, unsigned Bits,
return true;
}
-bool MachineConstEvaluator::evaluateSplatr(const RegisterSubReg &R1,
- unsigned Bits, unsigned Count, const CellMap &Inputs,
- LatticeCell &Result) {
+bool MachineConstEvaluator::evaluateSplatr(const RegSubRegPair &R1,
+ unsigned Bits, unsigned Count,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(R1.Reg));
LatticeCell LS1;
if (!getCell(R1, Inputs, LS1))
@@ -1835,8 +1841,8 @@ namespace {
bool evaluate(const MachineInstr &MI, const CellMap &Inputs,
CellMap &Outputs) override;
- bool evaluate(const RegisterSubReg &R, const LatticeCell &SrcC,
- LatticeCell &Result) override;
+ bool evaluate(const RegSubRegPair &R, const LatticeCell &SrcC,
+ LatticeCell &Result) override;
bool evaluate(const MachineInstr &BrI, const CellMap &Inputs,
SetVector<const MachineBasicBlock*> &Targets, bool &FallsThru)
override;
@@ -1850,8 +1856,8 @@ namespace {
const MachineOperand &MO);
void replaceWithNop(MachineInstr &MI);
- bool evaluateHexRSEQ32(RegisterSubReg RL, RegisterSubReg RH, const CellMap &Inputs,
- LatticeCell &Result);
+ bool evaluateHexRSEQ32(RegSubRegPair RL, RegSubRegPair RH,
+ const CellMap &Inputs, LatticeCell &Result);
bool evaluateHexCompare(const MachineInstr &MI, const CellMap &Inputs,
CellMap &Outputs);
// This is suitable to be called for compare-and-jump instructions.
@@ -1924,14 +1930,14 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
return false;
unsigned Opc = MI.getOpcode();
- RegisterSubReg DefR(MD);
+ RegSubRegPair DefR(getRegSubRegPair(MD));
assert(!DefR.SubReg);
if (!DefR.Reg.isVirtual())
return false;
if (MI.isCopy()) {
LatticeCell RC;
- RegisterSubReg SrcR(MI.getOperand(1));
+ RegSubRegPair SrcR(getRegSubRegPair(MI.getOperand(1)));
bool Eval = evaluateCOPY(SrcR, Inputs, RC);
if (!Eval)
return false;
@@ -1953,7 +1959,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
const MachineOperand &OpLo = LoIs1 ? MI.getOperand(1) : MI.getOperand(3);
const MachineOperand &OpHi = LoIs1 ? MI.getOperand(3) : MI.getOperand(1);
LatticeCell RC;
- RegisterSubReg SrcRL(OpLo), SrcRH(OpHi);
+ RegSubRegPair SrcRL(getRegSubRegPair(OpLo)), SrcRH(getRegSubRegPair(OpHi));
bool Eval = evaluateHexRSEQ32(SrcRL, SrcRH, Inputs, RC);
if (!Eval)
return false;
@@ -2040,7 +2046,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
int64_t B = MI.getOperand(2).getImm();
assert(B >=0 && B < 32);
APInt A(32, (1ull << B), false);
- RegisterSubReg R(MI.getOperand(1));
+ RegSubRegPair R(getRegSubRegPair(MI.getOperand(1)));
LatticeCell RC = Outputs.get(DefR.Reg);
bool Eval = evaluateORri(R, A, Inputs, RC);
if (!Eval)
@@ -2080,7 +2086,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
using namespace Hexagon;
bool Ones = (Opc == S2_ct1) || (Opc == S2_ct1p);
- RegisterSubReg R1(MI.getOperand(1));
+ RegSubRegPair R1(getRegSubRegPair(MI.getOperand(1)));
assert(Inputs.has(R1.Reg));
LatticeCell T;
bool Eval = evaluateCTBr(R1, !Ones, Ones, Inputs, T);
@@ -2112,7 +2118,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
bool OnlyZeros = (Opc == S2_cl0) || (Opc == S2_cl0p);
bool OnlyOnes = (Opc == S2_cl1) || (Opc == S2_cl1p);
- RegisterSubReg R1(MI.getOperand(1));
+ RegSubRegPair R1(getRegSubRegPair(MI.getOperand(1)));
assert(Inputs.has(R1.Reg));
LatticeCell T;
bool Eval = evaluateCLBr(R1, !OnlyOnes, !OnlyZeros, Inputs, T);
@@ -2140,7 +2146,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
{
bool Signed = (Opc == Hexagon::S4_extract) ||
(Opc == Hexagon::S4_extractp);
- RegisterSubReg R1(MI.getOperand(1));
+ RegSubRegPair R1(getRegSubRegPair(MI.getOperand(1)));
unsigned BW = getRegBitWidth(R1.Reg);
unsigned Bits = MI.getOperand(2).getImm();
unsigned Offset = MI.getOperand(3).getImm();
@@ -2191,8 +2197,9 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
return true;
}
-bool HexagonConstEvaluator::evaluate(const RegisterSubReg &R,
- const LatticeCell &Input, LatticeCell &Result) {
+bool HexagonConstEvaluator::evaluate(const RegSubRegPair &R,
+ const LatticeCell &Input,
+ LatticeCell &Result) {
if (!R.SubReg) {
Result = Input;
return true;
@@ -2282,7 +2289,7 @@ Undetermined:
if (SimpleBranch) {
const MachineOperand &MD = BrI.getOperand(0);
- RegisterSubReg PR(MD);
+ RegSubRegPair PR(getRegSubRegPair(MD));
// If the condition operand has a subregister, this is not something
// we currently recognize.
if (PR.SubReg)
@@ -2505,8 +2512,10 @@ void HexagonConstEvaluator::replaceWithNop(MachineInstr &MI) {
MI.removeOperand(0);
}
-bool HexagonConstEvaluator::evaluateHexRSEQ32(RegisterSubReg RL, RegisterSubReg RH,
- const CellMap &Inputs, LatticeCell &Result) {
+bool HexagonConstEvaluator::evaluateHexRSEQ32(RegSubRegPair RL,
+ RegSubRegPair RH,
+ const CellMap &Inputs,
+ LatticeCell &Result) {
assert(Inputs.has(RL.Reg) && Inputs.has(RH.Reg));
LatticeCell LSL, LSH;
if (!getCell(RL, Inputs, LSL) || !getCell(RH, Inputs, LSH))
@@ -2574,7 +2583,7 @@ bool HexagonConstEvaluator::evaluateHexCompare(const MachineInstr &MI,
if (Computed) {
// Only create a zero/non-zero cell. At this time there isn't really
// much need for specific values.
- RegisterSubReg DefR(MI.getOperand(0));
+ RegSubRegPair DefR(getRegSubRegPair(MI.getOperand(0)));
LatticeCell L = Outputs.get(DefR.Reg);
uint32_t P = Result ? ConstantProperties::NonZero
: ConstantProperties::Zero;
@@ -2594,9 +2603,9 @@ bool HexagonConstEvaluator::evaluateHexCompare2(unsigned Opc,
bool Reg1 = Src1.isReg(), Reg2 = Src2.isReg();
bool Imm1 = Src1.isImm(), Imm2 = Src2.isImm();
if (Reg1) {
- RegisterSubReg R1(Src1);
+ RegSubRegPair R1(getRegSubRegPair(Src1));
if (Reg2) {
- RegisterSubReg R2(Src2);
+ RegSubRegPair R2(getRegSubRegPair(Src2));
return evaluateCMPrr(Cmp, R1, R2, Inputs, Result);
} else if (Imm2) {
APInt A2 = getCmpImm(Opc, 2, Src2);
@@ -2605,7 +2614,7 @@ bool HexagonConstEvaluator::evaluateHexCompare2(unsigned Opc,
} else if (Imm1) {
APInt A1 = getCmpImm(Opc, 1, Src1);
if (Reg2) {
- RegisterSubReg R2(Src2);
+ RegSubRegPair R2(getRegSubRegPair(Src2));
uint32_t NegCmp = Comparison::negate(Cmp);
return evaluateCMPri(NegCmp, R2, A1, Inputs, Result);
} else if (Imm2) {
@@ -2624,7 +2633,7 @@ bool HexagonConstEvaluator::evaluateHexLogical(const MachineInstr &MI,
return false;
const MachineOperand &Src1 = MI.getOperand(1);
const MachineOperand &Src2 = MI.getOperand(2);
- RegisterSubReg R1(Src1);
+ RegSubRegPair R1(getRegSubRegPair(Src1));
bool Eval = false;
LatticeCell RC;
switch (Opc) {
@@ -2632,7 +2641,8 @@ bool HexagonConstEvaluator::evaluateHexLogical(const MachineInstr &MI,
return false;
case Hexagon::A2_and:
case Hexagon::A2_andp:
- Eval = evaluateANDrr(R1, RegisterSubReg(Src2), Inputs, RC);
+ Eval =
+ evaluateANDrr(R1, RegSubRegPair(getRegSubRegPair(Src2)), Inputs, RC);
break;
case Hexagon::A2_andir: {
if (!Src2.isImm())
@@ -2643,7 +2653,8 @@ bool HexagonConstEvaluator::evaluateHexLogical(const MachineInstr &MI,
}
case Hexagon::A2_or:
case Hexagon::A2_orp:
- Eval = evaluateORrr(R1, RegisterSubReg(Src2), Inputs, RC);
+ Eval =
+ evaluateORrr(R1, RegSubRegPair(getRegSubRegPair(Src2)), Inputs, RC);
break;
case Hexagon::A2_orir: {
if (!Src2.isImm())
@@ -2654,11 +2665,12 @@ bool HexagonConstEvaluator::evaluateHexLogical(const MachineInstr &MI,
}
case Hexagon::A2_xor:
case Hexagon::A2_xorp:
- Eval = evaluateXORrr(R1, RegisterSubReg(Src2), Inputs, RC);
+ Eval =
+ evaluateXORrr(R1, RegSubRegPair(getRegSubRegPair(Src2)), Inputs, RC);
break;
}
if (Eval) {
- RegisterSubReg DefR(MI.getOperand(0));
+ RegSubRegPair DefR(getRegSubRegPair(MI.getOperand(0)));
Outputs.update(DefR.Reg, RC);
}
return Eval;
@@ -2667,7 +2679,7 @@ bool HexagonConstEvaluator::evaluateHexLogical(const MachineInstr &MI,
bool HexagonConstEvaluator::evaluateHexCondMove(const MachineInstr &MI,
const CellMap &Inputs, CellMap &Outputs) {
// Dst0 = Cond1 ? Src2 : Src3
- RegisterSubReg CR(MI.getOperand(1));
+ RegSubRegPair CR(getRegSubRegPair(MI.getOperand(1)));
assert(Inputs.has(CR.Reg));
LatticeCell LS;
if (!getCell(CR, Inputs, LS))
@@ -2682,7 +2694,7 @@ bool HexagonConstEvaluator::evaluateHexCondMove(const MachineInstr &MI,
return false;
const MachineOperand &ValOp = MI.getOperand(TakeOp);
- RegisterSubReg DefR(MI.getOperand(0));
+ RegSubRegPair DefR(getRegSubRegPair(MI.getOperand(0)));
LatticeCell RC = Outputs.get(DefR.Reg);
if (ValOp.isImm()) {
@@ -2695,7 +2707,7 @@ bool HexagonConstEvaluator::evaluateHexCondMove(const MachineInstr &MI,
return true;
}
if (ValOp.isReg()) {
- RegisterSubReg R(ValOp);
+ RegSubRegPair R(getRegSubRegPair(ValOp));
const LatticeCell &LR = Inputs.get(R.Reg);
LatticeCell LSR;
if (!evaluate(R, LR, LSR))
@@ -2710,7 +2722,7 @@ bool HexagonConstEvaluator::evaluateHexCondMove(const MachineInstr &MI,
bool HexagonConstEvaluator::evaluateHexExt(const MachineInstr &MI,
const CellMap &Inputs, CellMap &Outputs) {
// Dst0 = ext R1
- RegisterSubReg R1(MI.getOperand(1));
+ RegSubRegPair R1(getRegSubRegPair(MI.getOperand(1)));
assert(Inputs.has(R1.Reg));
unsigned Opc = MI.getOpcode();
@@ -2740,7 +2752,7 @@ bool HexagonConstEvaluator::evaluateHexExt(const MachineInstr &MI,
break;
}
- RegisterSubReg DefR(MI.getOperand(0));
+ RegSubRegPair DefR(getRegSubRegPair(MI.getOperand(0)));
unsigned BW = getRegBitWidth(DefR.Reg);
LatticeCell RC = Outputs.get(DefR.Reg);
bool Eval = Signed ? evaluateSEXTr(R1, BW, Bits, Inputs, RC)
@@ -2754,8 +2766,8 @@ bool HexagonConstEvaluator::evaluateHexExt(const MachineInstr &MI,
bool HexagonConstEvaluator::evaluateHexVector1(const MachineInstr &MI,
const CellMap &Inputs, CellMap &Outputs) {
// DefR = op R1
- RegisterSubReg DefR(MI.getOperand(0));
- RegisterSubReg R1(MI.getOperand(1));
+ RegSubRegPair DefR(getRegSubRegPair(MI.getOperand(0)));
+ RegSubRegPair R1(getRegSubRegPair(MI.getOperand(1)));
assert(Inputs.has(R1.Reg));
LatticeCell RC = Outputs.get(DefR.Reg);
bool Eval;
@@ -2793,7 +2805,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI,
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || !MO.isUse() || MO.isImplicit())
continue;
- RegisterSubReg R(MO);
+ RegSubRegPair R(getRegSubRegPair(MO));
if (!R.Reg.isVirtual())
continue;
HasUse = true;
@@ -2963,10 +2975,10 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI,
// to DefR += mpyi(R, #imm),
// or DefR -= mpyi(R, #imm).
{
- RegisterSubReg DefR(MI.getOperand(0));
+ RegSubRegPair DefR(getRegSubRegPair(MI.getOperand(0)));
assert(!DefR.SubReg);
- RegisterSubReg R2(MI.getOperand(2));
- RegisterSubReg R3(MI.getOperand(3));
+ RegSubRegPair R2(getRegSubRegPair(MI.getOperand(2)));
+ RegSubRegPair R3(getRegSubRegPair(MI.getOperand(3)));
assert(Inputs.has(R2.Reg) && Inputs.has(R3.Reg));
LatticeCell LS2, LS3;
// It is enough to get one of the input cells, since we will only try
@@ -2980,7 +2992,7 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI,
if (Zero) {
// DefR == R1 (tied operands).
MachineOperand &Acc = MI.getOperand(1);
- RegisterSubReg R1(Acc);
+ RegSubRegPair R1(getRegSubRegPair(Acc));
unsigned NewR = R1.Reg;
if (R1.SubReg) {
// Generate COPY. FIXME: Replace with the register:subregister.
@@ -3027,8 +3039,8 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI,
case Hexagon::A2_and:
{
- RegisterSubReg R1(MI.getOperand(1));
- RegisterSubReg R2(MI.getOperand(2));
+ RegSubRegPair R1(getRegSubRegPair(MI.getOperand(1)));
+ RegSubRegPair R2(getRegSubRegPair(MI.getOperand(2)));
assert(Inputs.has(R1.Reg) && Inputs.has(R2.Reg));
LatticeCell LS1, LS2;
unsigned CopyOf = 0;
@@ -3046,8 +3058,8 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI,
if (!CopyOf)
return false;
MachineOperand &SO = MI.getOperand(CopyOf);
- RegisterSubReg SR(SO);
- RegisterSubReg DefR(MI.getOperand(0));
+ RegSubRegPair SR(getRegSubRegPair(SO));
+ RegSubRegPair DefR(getRegSubRegPair(MI.getOperand(0)));
unsigned NewR = SR.Reg;
if (SR.SubReg) {
const TargetRegisterClass *RC = MRI->getRegClass(DefR.Reg);
@@ -3063,8 +3075,8 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI,
case Hexagon::A2_or:
{
- RegisterSubReg R1(MI.getOperand(1));
- RegisterSubReg R2(MI.getOperand(2));
+ RegSubRegPair R1(getRegSubRegPair(MI.getOperand(1)));
+ RegSubRegPair R2(getRegSubRegPair(MI.getOperand(2)));
assert(Inputs.has(R1.Reg) && Inputs.has(R2.Reg));
LatticeCell LS1, LS2;
unsigned CopyOf = 0;
@@ -3078,8 +3090,8 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI,
if (!CopyOf)
return false;
MachineOperand &SO = MI.getOperand(CopyOf);
- RegisterSubReg SR(SO);
- RegisterSubReg DefR(MI.getOperand(0));
+ RegSubRegPair SR(getRegSubRegPair(SO));
+ RegSubRegPair DefR(getRegSubRegPair(MI.getOperand(0)));
unsigned NewR = SR.Reg;
if (SR.SubReg) {
const TargetRegisterClass *RC = MRI->getRegClass(DefR.Reg);
diff --git a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
index cbe644e..e0ed917 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -40,38 +40,23 @@ using namespace llvm;
namespace {
- // FIXME: Use TargetInstrInfo::RegSubRegPair
- struct RegisterSubReg {
- Register R;
- unsigned S;
+using RegSubRegPair = TargetInstrInfo::RegSubRegPair;
- RegisterSubReg(unsigned r = 0, unsigned s = 0) : R(r), S(s) {}
- RegisterSubReg(const MachineOperand &MO) : R(MO.getReg()), S(MO.getSubReg()) {}
- RegisterSubReg(const Register &Reg) : R(Reg), S(0) {}
+struct PrintRegister {
+ friend raw_ostream &operator<<(raw_ostream &OS, const PrintRegister &PR);
- bool operator== (const RegisterSubReg &Reg) const {
- return R == Reg.R && S == Reg.S;
- }
-
- bool operator< (const RegisterSubReg &Reg) const {
- return R < Reg.R || (R == Reg.R && S < Reg.S);
- }
- };
-
- struct PrintRegister {
- friend raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &PR);
+ PrintRegister(RegSubRegPair R, const TargetRegisterInfo &I)
+ : Reg(R), TRI(I) {}
- PrintRegister(RegisterSubReg R, const TargetRegisterInfo &I) : Reg(R), TRI(I) {}
-
- private:
- RegisterSubReg Reg;
- const TargetRegisterInfo &TRI;
- };
+private:
+ RegSubRegPair Reg;
+ const TargetRegisterInfo &TRI;
+};
raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &PR)
LLVM_ATTRIBUTE_UNUSED;
raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &PR) {
- return OS << printReg(PR.Reg.R, &PR.TRI, PR.Reg.S);
+ return OS << printReg(PR.Reg.Reg, &PR.TRI, PR.Reg.SubReg);
}
class HexagonGenPredicate : public MachineFunctionPass {
@@ -94,8 +79,8 @@ namespace {
private:
using VectOfInst = SetVector<MachineInstr *>;
- using SetOfReg = std::set<RegisterSubReg>;
- using RegToRegMap = std::map<RegisterSubReg, RegisterSubReg>;
+ using SetOfReg = SetVector<RegSubRegPair>;
+ using RegToRegMap = DenseMap<RegSubRegPair, RegSubRegPair>;
const HexagonInstrInfo *TII = nullptr;
const HexagonRegisterInfo *TRI = nullptr;
@@ -106,12 +91,12 @@ namespace {
bool isPredReg(Register R);
void collectPredicateGPR(MachineFunction &MF);
- void processPredicateGPR(const RegisterSubReg &Reg);
+ void processPredicateGPR(const RegSubRegPair &Reg);
unsigned getPredForm(unsigned Opc);
bool isConvertibleToPredForm(const MachineInstr *MI);
bool isScalarCmp(unsigned Opc);
- bool isScalarPred(RegisterSubReg PredReg);
- RegisterSubReg getPredRegFor(const RegisterSubReg &Reg);
+ bool isScalarPred(RegSubRegPair PredReg);
+ RegSubRegPair getPredRegFor(const RegSubRegPair &Reg);
bool convertToPredForm(MachineInstr *MI);
bool eliminatePredCopies(MachineFunction &MF);
};
@@ -204,8 +189,8 @@ void HexagonGenPredicate::collectPredicateGPR(MachineFunction &MF) {
case Hexagon::C2_tfrpr:
case TargetOpcode::COPY:
if (isPredReg(MI.getOperand(1).getReg())) {
- RegisterSubReg RD = MI.getOperand(0);
- if (RD.R.isVirtual())
+ RegSubRegPair RD = getRegSubRegPair(MI.getOperand(0));
+ if (RD.Reg.isVirtual())
PredGPRs.insert(RD);
}
break;
@@ -214,14 +199,16 @@ void HexagonGenPredicate::collectPredicateGPR(MachineFunction &MF) {
}
}
-void HexagonGenPredicate::processPredicateGPR(const RegisterSubReg &Reg) {
- LLVM_DEBUG(dbgs() << __func__ << ": " << printReg(Reg.R, TRI, Reg.S) << "\n");
+void HexagonGenPredicate::processPredicateGPR(const RegSubRegPair &Reg) {
+ LLVM_DEBUG(dbgs() << __func__ << ": " << printReg(Reg.Reg, TRI, Reg.SubReg)
+ << "\n");
using use_iterator = MachineRegisterInfo::use_iterator;
- use_iterator I = MRI->use_begin(Reg.R), E = MRI->use_end();
+ use_iterator I = MRI->use_begin(Reg.Reg), E = MRI->use_end();
if (I == E) {
- LLVM_DEBUG(dbgs() << "Dead reg: " << printReg(Reg.R, TRI, Reg.S) << '\n');
- MachineInstr *DefI = MRI->getVRegDef(Reg.R);
+ LLVM_DEBUG(dbgs() << "Dead reg: " << printReg(Reg.Reg, TRI, Reg.SubReg)
+ << '\n');
+ MachineInstr *DefI = MRI->getVRegDef(Reg.Reg);
DefI->eraseFromParent();
return;
}
@@ -233,22 +220,22 @@ void HexagonGenPredicate::processPredicateGPR(const RegisterSubReg &Reg) {
}
}
-RegisterSubReg HexagonGenPredicate::getPredRegFor(const RegisterSubReg &Reg) {
+RegSubRegPair HexagonGenPredicate::getPredRegFor(const RegSubRegPair &Reg) {
// Create a predicate register for a given Reg. The newly created register
// will have its value copied from Reg, so that it can be later used as
// an operand in other instructions.
- assert(Reg.R.isVirtual());
+ assert(Reg.Reg.isVirtual());
RegToRegMap::iterator F = G2P.find(Reg);
if (F != G2P.end())
return F->second;
LLVM_DEBUG(dbgs() << __func__ << ": " << PrintRegister(Reg, *TRI));
- MachineInstr *DefI = MRI->getVRegDef(Reg.R);
+ MachineInstr *DefI = MRI->getVRegDef(Reg.Reg);
assert(DefI);
unsigned Opc = DefI->getOpcode();
if (Opc == Hexagon::C2_tfrpr || Opc == TargetOpcode::COPY) {
assert(DefI->getOperand(0).isDef() && DefI->getOperand(1).isUse());
- RegisterSubReg PR = DefI->getOperand(1);
+ RegSubRegPair PR = getRegSubRegPair(DefI->getOperand(1));
G2P.insert(std::make_pair(Reg, PR));
LLVM_DEBUG(dbgs() << " -> " << PrintRegister(PR, *TRI) << '\n');
return PR;
@@ -264,11 +251,11 @@ RegisterSubReg HexagonGenPredicate::getPredRegFor(const RegisterSubReg &Reg) {
if (isConvertibleToPredForm(DefI)) {
MachineBasicBlock::iterator DefIt = DefI;
BuildMI(B, std::next(DefIt), DL, TII->get(TargetOpcode::COPY), NewPR)
- .addReg(Reg.R, 0, Reg.S);
- G2P.insert(std::make_pair(Reg, RegisterSubReg(NewPR)));
- LLVM_DEBUG(dbgs() << " -> !" << PrintRegister(RegisterSubReg(NewPR), *TRI)
+ .addReg(Reg.Reg, 0, Reg.SubReg);
+ G2P.insert(std::make_pair(Reg, RegSubRegPair(NewPR)));
+ LLVM_DEBUG(dbgs() << " -> !" << PrintRegister(RegSubRegPair(NewPR), *TRI)
<< '\n');
- return RegisterSubReg(NewPR);
+ return RegSubRegPair(NewPR);
}
llvm_unreachable("Invalid argument");
@@ -310,21 +297,21 @@ bool HexagonGenPredicate::isScalarCmp(unsigned Opc) {
return false;
}
-bool HexagonGenPredicate::isScalarPred(RegisterSubReg PredReg) {
- std::queue<RegisterSubReg> WorkQ;
+bool HexagonGenPredicate::isScalarPred(RegSubRegPair PredReg) {
+ std::queue<RegSubRegPair> WorkQ;
WorkQ.push(PredReg);
while (!WorkQ.empty()) {
- RegisterSubReg PR = WorkQ.front();
+ RegSubRegPair PR = WorkQ.front();
WorkQ.pop();
- const MachineInstr *DefI = MRI->getVRegDef(PR.R);
+ const MachineInstr *DefI = MRI->getVRegDef(PR.Reg);
if (!DefI)
return false;
unsigned DefOpc = DefI->getOpcode();
switch (DefOpc) {
case TargetOpcode::COPY: {
const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass;
- if (MRI->getRegClass(PR.R) != PredRC)
+ if (MRI->getRegClass(PR.Reg) != PredRC)
return false;
// If it is a copy between two predicate registers, fall through.
[[fallthrough]];
@@ -344,7 +331,7 @@ bool HexagonGenPredicate::isScalarPred(RegisterSubReg PredReg) {
// Add operands to the queue.
for (const MachineOperand &MO : DefI->operands())
if (MO.isReg() && MO.isUse())
- WorkQ.push(RegisterSubReg(MO.getReg()));
+ WorkQ.push(RegSubRegPair(MO.getReg()));
break;
// All non-vector compares are ok, everything else is bad.
@@ -366,8 +353,8 @@ bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
- RegisterSubReg Reg(MO);
- if (Reg.S && Reg.S != Hexagon::isub_lo)
+ RegSubRegPair Reg(getRegSubRegPair(MO));
+ if (Reg.SubReg && Reg.SubReg != Hexagon::isub_lo)
return false;
if (!PredGPRs.count(Reg))
return false;
@@ -393,7 +380,7 @@ bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) {
// If it's a scalar predicate register, then all bits in it are
// the same. Otherwise, to determine whether all bits are 0 or not
// we would need to use any8.
- RegisterSubReg PR = getPredRegFor(MI->getOperand(1));
+ RegSubRegPair PR = getPredRegFor(getRegSubRegPair(MI->getOperand(1)));
if (!isScalarPred(PR))
return false;
// This will skip the immediate argument when creating the predicate
@@ -404,37 +391,37 @@ bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) {
// Check that def is in operand #0.
MachineOperand &Op0 = MI->getOperand(0);
assert(Op0.isDef());
- RegisterSubReg OutR(Op0);
+ RegSubRegPair OutR(getRegSubRegPair(Op0));
// Don't use getPredRegFor, since it will create an association between
// the argument and a created predicate register (i.e. it will insert a
// copy if a new predicate register is created).
const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass;
- RegisterSubReg NewPR = MRI->createVirtualRegister(PredRC);
- MachineInstrBuilder MIB = BuildMI(B, MI, DL, TII->get(NewOpc), NewPR.R);
+ RegSubRegPair NewPR = MRI->createVirtualRegister(PredRC);
+ MachineInstrBuilder MIB = BuildMI(B, MI, DL, TII->get(NewOpc), NewPR.Reg);
// Add predicate counterparts of the GPRs.
for (unsigned i = 1; i < NumOps; ++i) {
- RegisterSubReg GPR = MI->getOperand(i);
- RegisterSubReg Pred = getPredRegFor(GPR);
- MIB.addReg(Pred.R, 0, Pred.S);
+ RegSubRegPair GPR = getRegSubRegPair(MI->getOperand(i));
+ RegSubRegPair Pred = getPredRegFor(GPR);
+ MIB.addReg(Pred.Reg, 0, Pred.SubReg);
}
LLVM_DEBUG(dbgs() << "generated: " << *MIB);
// Generate a copy-out: NewGPR = NewPR, and replace all uses of OutR
// with NewGPR.
- const TargetRegisterClass *RC = MRI->getRegClass(OutR.R);
+ const TargetRegisterClass *RC = MRI->getRegClass(OutR.Reg);
Register NewOutR = MRI->createVirtualRegister(RC);
BuildMI(B, MI, DL, TII->get(TargetOpcode::COPY), NewOutR)
- .addReg(NewPR.R, 0, NewPR.S);
- MRI->replaceRegWith(OutR.R, NewOutR);
+ .addReg(NewPR.Reg, 0, NewPR.SubReg);
+ MRI->replaceRegWith(OutR.Reg, NewOutR);
MI->eraseFromParent();
// If the processed instruction was C2_tfrrp (i.e. Rn = Pm; Pk = Rn),
// then the output will be a predicate register. Do not visit the
// users of it.
if (!isPredReg(NewOutR)) {
- RegisterSubReg R(NewOutR);
+ RegSubRegPair R(NewOutR);
PredGPRs.insert(R);
processPredicateGPR(R);
}
@@ -461,18 +448,18 @@ bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) {
for (MachineInstr &MI : MBB) {
if (MI.getOpcode() != TargetOpcode::COPY)
continue;
- RegisterSubReg DR = MI.getOperand(0);
- RegisterSubReg SR = MI.getOperand(1);
- if (!DR.R.isVirtual())
+ RegSubRegPair DR = getRegSubRegPair(MI.getOperand(0));
+ RegSubRegPair SR = getRegSubRegPair(MI.getOperand(1));
+ if (!DR.Reg.isVirtual())
continue;
- if (!SR.R.isVirtual())
+ if (!SR.Reg.isVirtual())
continue;
- if (MRI->getRegClass(DR.R) != PredRC)
+ if (MRI->getRegClass(DR.Reg) != PredRC)
continue;
- if (MRI->getRegClass(SR.R) != PredRC)
+ if (MRI->getRegClass(SR.Reg) != PredRC)
continue;
- assert(!DR.S && !SR.S && "Unexpected subregister");
- MRI->replaceRegWith(DR.R, SR.R);
+ assert(!DR.SubReg && !SR.SubReg && "Unexpected subregister");
+ MRI->replaceRegWith(DR.Reg, SR.Reg);
Erase.insert(&MI);
Changed = true;
}
@@ -497,7 +484,7 @@ bool HexagonGenPredicate::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
collectPredicateGPR(MF);
- for (const RegisterSubReg &R : PredGPRs)
+ for (const RegSubRegPair &R : PredGPRs)
processPredicateGPR(R);
bool Again;
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index b8f3455..086cb1f 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -534,6 +534,13 @@ public:
MCInst getNop() const override;
};
+/// \brief Create RegSubRegPair from a register MachineOperand
+inline TargetInstrInfo::RegSubRegPair
+getRegSubRegPair(const MachineOperand &O) {
+ assert(O.isReg());
+ return TargetInstrInfo::RegSubRegPair(O.getReg(), O.getSubReg());
+}
+
} // end namespace llvm
#endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONINSTRINFO_H
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index 7f83155..91051cd 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -644,9 +644,8 @@ MCSubtargetInfo *Hexagon_MC::createHexagonMCSubtargetInfo(const Triple &TT,
void Hexagon_MC::addArchSubtarget(MCSubtargetInfo const *STI, StringRef FS) {
assert(STI != nullptr);
if (STI->getCPU().contains("t")) {
- auto ArchSTI = createHexagonMCSubtargetInfo(
- STI->getTargetTriple(),
- STI->getCPU().substr(0, STI->getCPU().size() - 1), FS);
+ auto ArchSTI = createHexagonMCSubtargetInfo(STI->getTargetTriple(),
+ STI->getCPU().drop_back(), FS);
std::lock_guard<std::mutex> Lock(ArchSubtargetMutex);
ArchSubtarget[std::string(STI->getCPU())] =
std::unique_ptr<MCSubtargetInfo const>(ArchSTI);
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index 66c5139..2ada2e4 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -492,7 +492,7 @@ NVPTXTTIImpl::getInstructionCost(const User *U,
// since it is classified as a call in the IR. A better cost model would
// be to return the number of asm instructions embedded in the asm
// string.
- auto &AsmStr = IA->getAsmString();
+ StringRef AsmStr = IA->getAsmString();
const unsigned InstCount =
count_if(split(AsmStr, ';'), [](StringRef AsmInst) {
// Trim off scopes denoted by '{' and '}' as these can be ignored
diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index 8f1b790..ee8aa37 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -727,7 +727,8 @@ static constexpr FeatureBitset XTHeadGroup = {
RISCV::FeatureVendorXTHeadMemPair, RISCV::FeatureVendorXTHeadSync,
RISCV::FeatureVendorXTHeadVdot};
-static constexpr FeatureBitset XAndesGroup = {RISCV::FeatureVendorXAndesPerf};
+static constexpr FeatureBitset XAndesGroup = {
+ RISCV::FeatureVendorXAndesPerf, RISCV::FeatureVendorXAndesVPackFPH};
static constexpr DecoderListEntry DecoderList32[]{
// Vendor Extensions
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 18d341a..43e41f0 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1517,6 +1517,14 @@ def HasVendorXAndesPerf
AssemblerPredicate<(all_of FeatureVendorXAndesPerf),
"'XAndesPerf' (Andes Performance Extension)">;
+def FeatureVendorXAndesVPackFPH
+ : RISCVExtension<5, 0, "Andes Vector Packed FP16 Extension",
+ [FeatureStdExtZvfhmin]>;
+def HasVendorXAndesVPackFPH
+ : Predicate<"Subtarget->hasVendorXAndesVPackFPH()">,
+ AssemblerPredicate<(all_of FeatureVendorXAndesVPackFPH),
+ "'XAndesVPackFPH' (Andes Vector Packed FP16 Extension)">;
+
//===----------------------------------------------------------------------===//
// LLVM specific features and extensions
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 134d82d..0f93bd9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14516,8 +14516,8 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
// (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
- // Perform this optimization only in the zba extension.
- if (!Subtarget.hasStdExtZba())
+ // Perform this optimization only in the zba/xandesperf extension.
+ if (!Subtarget.hasStdExtZba() && !Subtarget.hasVendorXAndesPerf())
return SDValue();
// Skip for vector types and larger types.
@@ -15448,8 +15448,9 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
if (VT != Subtarget.getXLenVT())
return SDValue();
- const bool HasShlAdd =
- Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa();
+ const bool HasShlAdd = Subtarget.hasStdExtZba() ||
+ Subtarget.hasVendorXTHeadBa() ||
+ Subtarget.hasVendorXAndesPerf();
ConstantSDNode *CNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!CNode)
@@ -18407,7 +18408,6 @@ static SDValue performVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG,
static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
-
assert(N->getOpcode() == RISCVISD::ADD_VL || N->getOpcode() == ISD::ADD);
if (N->getValueType(0).isFixedLengthVector())
@@ -18471,9 +18471,74 @@ static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(Opc, DL, VT, Ops);
}
-static bool legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index,
- ISD::MemIndexType &IndexType,
- RISCVTargetLowering::DAGCombinerInfo &DCI) {
+static SDValue combineVqdotAccum(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+
+ assert(N->getOpcode() == RISCVISD::ADD_VL);
+
+ if (!N->getValueType(0).isVector())
+ return SDValue();
+
+ SDValue Addend = N->getOperand(0);
+ SDValue DotOp = N->getOperand(1);
+
+ SDValue AddPassthruOp = N->getOperand(2);
+ if (!AddPassthruOp.isUndef())
+ return SDValue();
+
+ auto IsVqdotqOpc = [](unsigned Opc) {
+ switch (Opc) {
+ case RISCVISD::VQDOT_VL:
+ case RISCVISD::VQDOTU_VL:
+ case RISCVISD::VQDOTSU_VL:
+ return true;
+ default:
+ return false;
+ }
+ };
+
+ if (!IsVqdotqOpc(DotOp.getOpcode()))
+ std::swap(Addend, DotOp);
+
+ if (!IsVqdotqOpc(DotOp.getOpcode()))
+ return SDValue();
+
+ SDValue AddMask = N->getOperand(3);
+ SDValue AddVL = N->getOperand(4);
+
+ SDValue MulVL = DotOp.getOperand(4);
+ if (AddVL != MulVL)
+ return SDValue();
+
+ if (AddMask.getOpcode() != RISCVISD::VMSET_VL ||
+ AddMask.getOperand(0) != MulVL)
+ return SDValue();
+
+ SDValue AccumOp = DotOp.getOperand(2);
+ bool IsNullAdd = ISD::isConstantSplatVectorAllZeros(AccumOp.getNode());
+ // Peek through fixed to scalable
+ if (!IsNullAdd && AccumOp.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ AccumOp.getOperand(0).isUndef())
+ IsNullAdd =
+ ISD::isConstantSplatVectorAllZeros(AccumOp.getOperand(1).getNode());
+
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ // The manual constant folding is required, this case is not constant folded
+ // or combined.
+ if (!IsNullAdd)
+ Addend = DAG.getNode(RISCVISD::ADD_VL, DL, VT, AccumOp, Addend,
+ DAG.getUNDEF(VT), AddMask, AddVL);
+
+ SDValue Ops[] = {DotOp.getOperand(0), DotOp.getOperand(1), Addend,
+ DotOp.getOperand(3), DotOp->getOperand(4)};
+ return DAG.getNode(DotOp->getOpcode(), DL, VT, Ops);
+}
+
+static bool
+legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index,
+ ISD::MemIndexType &IndexType,
+ RISCVTargetLowering::DAGCombinerInfo &DCI) {
if (!DCI.isBeforeLegalize())
return false;
@@ -19594,6 +19659,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
case RISCVISD::ADD_VL:
if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
return V;
+ if (SDValue V = combineVqdotAccum(N, DAG, Subtarget))
+ return V;
return combineToVWMACC(N, DAG, Subtarget);
case RISCVISD::VWADD_W_VL:
case RISCVISD::VWADDU_W_VL:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
index 2ec7684..aa70a9d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
@@ -135,6 +135,16 @@ class NDSRVInstRR<bits<7> funct7, string opcodestr>
let mayStore = 0;
}
+class NDSRVInstLEA<bits<7> funct7, string opcodestr>
+ : RVInstR<funct7, 0b000, OPC_CUSTOM_2,
+ (outs GPR:$rd), (ins GPR:$rs2, GPR:$rs1),
+ opcodestr, "$rd, $rs1, $rs2">,
+ Sched<[WriteIALU, ReadIALU, ReadIALU]> {
+ let hasSideEffects = 0;
+ let mayLoad = 0;
+ let mayStore = 0;
+}
+
// GP: ADDI, LB, LBU
class NDSRVInstLBGP<bits<2> funct2, string opcodestr>
: RVInst<(outs GPR:$rd), (ins simm18:$imm18),
@@ -305,6 +315,29 @@ class NDSRVInstSDGP<bits<3> funct3, string opcodestr>
let mayStore = 1;
}
+class NDSRVInstVFPMAD<bits<6> funct6, string opcodestr>
+ : RVInst<(outs VR:$vd), (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
+ opcodestr # "." # "vf", "$vd, $rs1, $vs2$vm", [], InstFormatR>,
+ SchedBinaryMC<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF"> {
+ bits<5> vs2;
+ bits<5> rs1;
+ bits<5> vd;
+ bit vm;
+
+ let Inst{31-26} = funct6;
+ let Inst{25} = vm;
+ let Inst{24-20} = vs2;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = 0b100;
+ let Inst{11-7} = vd;
+ let Inst{6-0} = OPC_CUSTOM_2.Value;
+ let hasSideEffects = 0;
+ let mayLoad = 0;
+ let mayStore = 0;
+
+ let RVVConstraint = VMConstraint;
+}
+
//===----------------------------------------------------------------------===//
// XAndesPerf
//===----------------------------------------------------------------------===//
@@ -321,9 +354,9 @@ def NDS_BNEC : NDSRVInstBC<0b110, "nds.bnec">;
def NDS_BFOS : NDSRVInstBFO<0b011, "nds.bfos">;
def NDS_BFOZ : NDSRVInstBFO<0b010, "nds.bfoz">;
-def NDS_LEA_H : NDSRVInstRR<0b0000101, "nds.lea.h">;
-def NDS_LEA_W : NDSRVInstRR<0b0000110, "nds.lea.w">;
-def NDS_LEA_D : NDSRVInstRR<0b0000111, "nds.lea.d">;
+def NDS_LEA_H : NDSRVInstLEA<0b0000101, "nds.lea.h">;
+def NDS_LEA_W : NDSRVInstLEA<0b0000110, "nds.lea.w">;
+def NDS_LEA_D : NDSRVInstLEA<0b0000111, "nds.lea.d">;
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
def NDS_ADDIGP : NDSRVInstLBGP<0b01, "nds.addigp">;
@@ -345,14 +378,53 @@ def NDS_FLMISM : NDSRVInstRR<0b0010011, "nds.flmism">;
} // Predicates = [HasVendorXAndesPerf]
let Predicates = [HasVendorXAndesPerf, IsRV64] in {
-def NDS_LEA_B_ZE : NDSRVInstRR<0b0001000, "nds.lea.b.ze">;
-def NDS_LEA_H_ZE : NDSRVInstRR<0b0001001, "nds.lea.h.ze">;
-def NDS_LEA_W_ZE : NDSRVInstRR<0b0001010, "nds.lea.w.ze">;
-def NDS_LEA_D_ZE : NDSRVInstRR<0b0001011, "nds.lea.d.ze">;
+def NDS_LEA_B_ZE : NDSRVInstLEA<0b0001000, "nds.lea.b.ze">;
+def NDS_LEA_H_ZE : NDSRVInstLEA<0b0001001, "nds.lea.h.ze">;
+def NDS_LEA_W_ZE : NDSRVInstLEA<0b0001010, "nds.lea.w.ze">;
+def NDS_LEA_D_ZE : NDSRVInstLEA<0b0001011, "nds.lea.d.ze">;
def NDS_LWUGP : NDSRVInstLWGP<0b110, "nds.lwugp">;
def NDS_LDGP : NDSRVInstLDGP<0b011, "nds.ldgp">;
def NDS_SDGP : NDSRVInstSDGP<0b111, "nds.sdgp">;
} // Predicates = [HasVendorXAndesPerf, IsRV64]
+
+//===----------------------------------------------------------------------===//
+// XAndesVPackFPH
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasVendorXAndesVPackFPH],
+ Uses = [FRM, VL, VTYPE], mayRaiseFPException = true in {
+def NDS_VFPMADT_VF : NDSRVInstVFPMAD<0b000010, "nds.vfpmadt">;
+def NDS_VFPMADB_VF : NDSRVInstVFPMAD<0b000011, "nds.vfpmadb">;
+}
} // DecoderNamespace = "XAndes"
+
+// Patterns
+
+let Predicates = [HasVendorXAndesPerf] in {
+
+defm : ShxAddPat<1, NDS_LEA_H>;
+defm : ShxAddPat<2, NDS_LEA_W>;
+defm : ShxAddPat<3, NDS_LEA_D>;
+
+def : CSImm12MulBy4Pat<NDS_LEA_W>;
+def : CSImm12MulBy8Pat<NDS_LEA_D>;
+} // Predicates = [HasVendorXAndesPerf]
+
+let Predicates = [HasVendorXAndesPerf, IsRV64] in {
+
+defm : ADD_UWPat<NDS_LEA_B_ZE>;
+
+defm : ShxAdd_UWPat<1, NDS_LEA_H_ZE>;
+defm : ShxAdd_UWPat<2, NDS_LEA_W_ZE>;
+defm : ShxAdd_UWPat<3, NDS_LEA_D_ZE>;
+
+defm : Sh1Add_UWPat<NDS_LEA_H_ZE>;
+defm : Sh2Add_UWPat<NDS_LEA_W_ZE>;
+defm : Sh3Add_UWPat<NDS_LEA_D_ZE>;
+
+def : Sh1AddPat<NDS_LEA_H_ZE>;
+def : Sh2AddPat<NDS_LEA_W_ZE>;
+def : Sh3AddPat<NDS_LEA_D_ZE>;
+} // Predicates = [HasVendorXAndesPerf, IsRV64]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 9227c1b..4353e94 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -672,10 +672,7 @@ def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (PACK GPR:$rs, (XLenVT X0))>;
let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV64] in
def : Pat<(i64 (and GPR:$rs, 0xFFFF)), (PACKW GPR:$rs, (XLenVT X0))>;
-let Predicates = [HasStdExtZba] in {
-
-foreach i = {1,2,3} in {
- defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
+multiclass ShxAddPat<int i, Instruction shxadd> {
def : Pat<(XLenVT (add_like_non_imm12 (shl GPR:$rs1, (XLenVT i)), GPR:$rs2)),
(shxadd GPR:$rs1, GPR:$rs2)>;
def : Pat<(XLenVT (riscv_shl_add GPR:$rs1, (XLenVT i), GPR:$rs2)),
@@ -687,15 +684,90 @@ foreach i = {1,2,3} in {
(shxadd pat:$rs1, GPR:$rs2)>;
}
-def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy4:$i),
- (SH2ADD (XLenVT (ADDI (XLenVT X0), CSImm12MulBy4:$i)),
+class CSImm12MulBy4Pat<Instruction sh2add>
+ : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy4:$i),
+ (sh2add (XLenVT (ADDI (XLenVT X0), CSImm12MulBy4:$i)),
GPR:$r)>;
-def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy8:$i),
- (SH3ADD (XLenVT (ADDI (XLenVT X0), CSImm12MulBy8:$i)),
+
+class CSImm12MulBy8Pat<Instruction sh3add>
+ : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy8:$i),
+ (sh3add (XLenVT (ADDI (XLenVT X0), CSImm12MulBy8:$i)),
GPR:$r)>;
+let Predicates = [HasStdExtZba] in {
+foreach i = {1,2,3} in {
+ defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
+ defm : ShxAddPat<i, shxadd>;
+}
+
+def : CSImm12MulBy4Pat<SH2ADD>;
+def : CSImm12MulBy8Pat<SH3ADD>;
} // Predicates = [HasStdExtZba]
+multiclass ADD_UWPat<Instruction add_uw> {
+ def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
+ (add_uw GPR:$rs1, GPR:$rs2)>;
+ def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (add_uw GPR:$rs, (XLenVT X0))>;
+}
+
+multiclass ShxAdd_UWPat<int i, Instruction shxadd_uw> {
+ def : Pat<(i64 (add_like_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)),
+ (XLenVT GPR:$rs2))),
+ (shxadd_uw GPR:$rs1, GPR:$rs2)>;
+ def : Pat<(i64 (riscv_shl_add (and GPR:$rs1, 0xFFFFFFFF), (i64 i), GPR:$rs2)),
+ (shxadd_uw GPR:$rs1, GPR:$rs2)>;
+
+ defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
+ // More complex cases use a ComplexPattern.
+ def : Pat<(i64 (add_like_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
+ (shxadd_uw pat:$rs1, GPR:$rs2)>;
+}
+
+multiclass Sh1Add_UWPat<Instruction sh1add_uw> {
+ def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF),
+ (XLenVT GPR:$rs2))),
+ (sh1add_uw GPR:$rs1, GPR:$rs2)>;
+ // Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
+ def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE),
+ (XLenVT GPR:$rs2))),
+ (sh1add_uw (XLenVT (SRLI GPR:$rs1, 1)), GPR:$rs2)>;
+}
+
+multiclass Sh2Add_UWPat<Instruction sh2add_uw> {
+ def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF),
+ (XLenVT GPR:$rs2))),
+ (sh2add_uw GPR:$rs1, GPR:$rs2)>;
+ // Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
+ def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC),
+ (XLenVT GPR:$rs2))),
+ (sh2add_uw (XLenVT (SRLI GPR:$rs1, 2)), GPR:$rs2)>;
+}
+
+multiclass Sh3Add_UWPat<Instruction sh3add_uw> {
+ def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8),
+ (XLenVT GPR:$rs2))),
+ (sh3add_uw (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
+ // Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
+ def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8),
+ (XLenVT GPR:$rs2))),
+ (sh3add_uw (XLenVT (SRLI GPR:$rs1, 3)), GPR:$rs2)>;
+}
+
+class Sh1AddPat<Instruction sh1add>
+ : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFE),
+ (XLenVT GPR:$rs2))),
+ (sh1add (XLenVT (SRLIW GPR:$rs1, 1)), GPR:$rs2)>;
+
+class Sh2AddPat<Instruction sh2add>
+ : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFC),
+ (XLenVT GPR:$rs2))),
+ (sh2add (XLenVT (SRLIW GPR:$rs1, 2)), GPR:$rs2)>;
+
+class Sh3AddPat<Instruction sh3add>
+ : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8),
+ (XLenVT GPR:$rs2))),
+ (sh3add (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
+
let Predicates = [HasStdExtZba, IsRV64] in {
def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
(SLLI_UW GPR:$rs1, uimm5:$shamt)>;
@@ -704,47 +776,21 @@ def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
(SLLI_UW (XLenVT (SRLI GPR:$rs1, Shifted32OnesMask:$mask)),
Shifted32OnesMask:$mask)>;
-def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
- (ADD_UW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>;
-foreach i = {1,2,3} in {
- defvar shxadd_uw = !cast<Instruction>("SH"#i#"ADD_UW");
- def : Pat<(i64 (add_like_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)), (XLenVT GPR:$rs2))),
- (shxadd_uw GPR:$rs1, GPR:$rs2)>;
- def : Pat<(i64 (riscv_shl_add (and GPR:$rs1, 0xFFFFFFFF), (i64 i), GPR:$rs2)),
- (shxadd_uw GPR:$rs1, GPR:$rs2)>;
-}
-
-def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), (XLenVT GPR:$rs2))),
- (SH1ADD_UW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), (XLenVT GPR:$rs2))),
- (SH2ADD_UW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), (XLenVT GPR:$rs2))),
- (SH3ADD_UW GPR:$rs1, GPR:$rs2)>;
+defm : ADD_UWPat<ADD_UW>;
-// More complex cases use a ComplexPattern.
foreach i = {1,2,3} in {
- defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
- def : Pat<(i64 (add_like_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
- (!cast<Instruction>("SH"#i#"ADD_UW") pat:$rs1, GPR:$rs2)>;
+ defvar shxadd_uw = !cast<Instruction>("SH"#i#"ADD_UW");
+ defm : ShxAdd_UWPat<i, shxadd_uw>;
}
-def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT GPR:$rs2))),
- (SH1ADD (XLenVT (SRLIW GPR:$rs1, 1)), GPR:$rs2)>;
-def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT GPR:$rs2))),
- (SH2ADD (XLenVT (SRLIW GPR:$rs1, 2)), GPR:$rs2)>;
-def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT GPR:$rs2))),
- (SH3ADD (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
-
-// Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
-def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT GPR:$rs2))),
- (SH1ADD_UW (XLenVT (SRLI GPR:$rs1, 1)), GPR:$rs2)>;
-def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT GPR:$rs2))),
- (SH2ADD_UW (XLenVT (SRLI GPR:$rs1, 2)), GPR:$rs2)>;
-def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT GPR:$rs2))),
- (SH3ADD_UW (XLenVT (SRLI GPR:$rs1, 3)), GPR:$rs2)>;
+defm : Sh1Add_UWPat<SH1ADD_UW>;
+defm : Sh2Add_UWPat<SH2ADD_UW>;
+defm : Sh3Add_UWPat<SH3ADD_UW>;
+def : Sh1AddPat<SH1ADD>;
+def : Sh2AddPat<SH2ADD>;
+def : Sh3AddPat<SH3ADD>;
} // Predicates = [HasStdExtZba, IsRV64]
let Predicates = [HasStdExtZbcOrZbkc] in {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
index 715ba3a..348de5d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
@@ -10,6 +10,10 @@
//
//===----------------------------------------------------------------------===//
+// This predicate is true when the rs2 operand of vlse or vsse is x0, false
+// otherwise.
+def VLDSX0Pred : MCSchedPredicate<CheckRegOperand<3, X0>>;
+
// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
def isSEXT_W
: TIIPredicate<"isSEXT_W",
diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td
index 0204ab4..6c7658c 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleV.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td
@@ -9,10 +9,6 @@
//===----------------------------------------------------------------------===//
/// Define scheduler resources associated with def operands.
-// This predicate is true when the rs2 operand of vlse or vsse is x0, false
-// otherwise.
-def VLDSX0Pred : MCSchedPredicate<CheckRegOperand<3, X0>>;
-
defvar SchedMxList = ["MF8", "MF4", "MF2", "M1", "M2", "M4", "M8"];
// Used for widening and narrowing instructions as it doesn't contain M8.
defvar SchedMxListW = !listremove(SchedMxList, ["M8"]);
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index 41f8e1a..721b0bf 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -583,6 +583,8 @@ bool RISCVVectorPeephole::foldUndefPassthruVMV_V_V(MachineInstr &MI) {
SrcPolicy.setImm(SrcPolicy.getImm() | RISCVVType::TAIL_AGNOSTIC);
}
+ MRI->constrainRegClass(MI.getOperand(2).getReg(),
+ MRI->getRegClass(MI.getOperand(0).getReg()));
MRI->replaceRegWith(MI.getOperand(0).getReg(), MI.getOperand(2).getReg());
MRI->clearKillFlags(MI.getOperand(2).getReg());
MI.eraseFromParent();
@@ -653,6 +655,8 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) {
Policy |= RISCVVType::TAIL_AGNOSTIC;
Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy);
+ MRI->constrainRegClass(Src->getOperand(0).getReg(),
+ MRI->getRegClass(MI.getOperand(0).getReg()));
MRI->replaceRegWith(MI.getOperand(0).getReg(), Src->getOperand(0).getReg());
MI.eraseFromParent();
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
index ad42c73..251828b6 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
@@ -1632,10 +1632,8 @@ SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVTypeByName(
auto SpirvTy = getOrCreateSPIRVType(Ty, MIRBuilder, AQ, false, true);
// Handle "type*" or "type* vector[N]".
- if (TypeStr.starts_with("*")) {
+ if (TypeStr.consume_front("*"))
SpirvTy = getOrCreateSPIRVPointerType(Ty, MIRBuilder, SC);
- TypeStr = TypeStr.substr(strlen("*"));
- }
// Handle "typeN*" or "type vector[N]*".
bool IsPtrToVec = TypeStr.consume_back("*");
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
index ed52e0c..4cb1df2 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
@@ -192,8 +192,8 @@ void SparcInstPrinter::printCCOperand(const MCInst *MI, int opNum,
// Make sure CC is a fp conditional flag.
CC = (CC < SPCC::FCC_BEGIN) ? (CC + SPCC::FCC_BEGIN) : CC;
break;
- case SP::CBCOND:
- case SP::CBCONDA:
+ case SP::CPBCOND:
+ case SP::CPBCONDA:
// Make sure CC is a cp conditional flag.
CC = (CC < SPCC::CPCC_BEGIN) ? (CC + SPCC::CPCC_BEGIN) : CC;
break;
diff --git a/llvm/lib/Target/Sparc/Sparc.td b/llvm/lib/Target/Sparc/Sparc.td
index 2083c0e..93c3098 100644
--- a/llvm/lib/Target/Sparc/Sparc.td
+++ b/llvm/lib/Target/Sparc/Sparc.td
@@ -49,6 +49,12 @@ def FeatureVIS2
def FeatureVIS3
: SubtargetFeature<"vis3", "IsVIS3", "true",
"Enable Visual Instruction Set extensions III">;
+def FeatureUA2005
+ : SubtargetFeature<"ua2005", "IsUA2005", "true",
+ "Enable UltraSPARC Architecture 2005 extensions">;
+def FeatureUA2007
+ : SubtargetFeature<"ua2007", "IsUA2007", "true",
+ "Enable UltraSPARC Architecture 2007 extensions">;
def FeatureLeon
: SubtargetFeature<"leon", "IsLeon", "true",
"Enable LEON extensions">;
@@ -152,13 +158,15 @@ def : Proc<"ultrasparc3", [FeatureV9, FeatureV8Deprecated, FeatureVIS,
FeatureVIS2],
[TuneSlowRDPC]>;
def : Proc<"niagara", [FeatureV9, FeatureV8Deprecated, FeatureVIS,
- FeatureVIS2]>;
+ FeatureVIS2, FeatureUA2005]>;
def : Proc<"niagara2", [FeatureV9, FeatureV8Deprecated, UsePopc,
- FeatureVIS, FeatureVIS2]>;
+ FeatureVIS, FeatureVIS2, FeatureUA2005]>;
def : Proc<"niagara3", [FeatureV9, FeatureV8Deprecated, UsePopc,
- FeatureVIS, FeatureVIS2, FeatureVIS3]>;
+ FeatureVIS, FeatureVIS2, FeatureVIS3,
+ FeatureUA2005, FeatureUA2007]>;
def : Proc<"niagara4", [FeatureV9, FeatureV8Deprecated, UsePopc,
- FeatureVIS, FeatureVIS2, FeatureVIS3]>;
+ FeatureVIS, FeatureVIS2, FeatureVIS3,
+ FeatureUA2005, FeatureUA2007]>;
// LEON 2 FT generic
def : Processor<"leon2", LEON2Itineraries,
diff --git a/llvm/lib/Target/Sparc/SparcInstrAliases.td b/llvm/lib/Target/Sparc/SparcInstrAliases.td
index bc57ddb..590395c 100644
--- a/llvm/lib/Target/Sparc/SparcInstrAliases.td
+++ b/llvm/lib/Target/Sparc/SparcInstrAliases.td
@@ -286,11 +286,11 @@ multiclass cp_cond_alias<string cond, int condVal> {
// cb<cond> $imm
def : InstAlias<!strconcat(!strconcat("cb", cond), " $imm"),
- (CBCOND brtarget:$imm, condVal), 0>;
+ (CPBCOND brtarget:$imm, condVal), 0>;
// cb<cond>,a $imm
def : InstAlias<!strconcat(!strconcat("cb", cond), ",a $imm"),
- (CBCONDA brtarget:$imm, condVal), 0>;
+ (CPBCONDA brtarget:$imm, condVal), 0>;
}
// Instruction aliases for register conditional branches and moves.
diff --git a/llvm/lib/Target/Sparc/SparcInstrFormats.td b/llvm/lib/Target/Sparc/SparcInstrFormats.td
index 4ff902b..2998f53 100644
--- a/llvm/lib/Target/Sparc/SparcInstrFormats.td
+++ b/llvm/lib/Target/Sparc/SparcInstrFormats.td
@@ -260,6 +260,23 @@ multiclass F3_S<string OpcStr, bits<6> Op3Val, bit XVal, SDNode OpNode,
itin>;
}
+// 4-operand instructions.
+class F3_4<bits<6> op3val, bits<4> op5val, dag outs, dag ins,
+ string asmstr, list<dag> pattern = [], InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bits<4> op5;
+ bits<5> rs3;
+ bits<5> rs2;
+
+ let op = 2;
+ let op3 = op3val;
+ let op5 = op5val;
+
+ let Inst{13-9} = rs3;
+ let Inst{8-5} = op5;
+ let Inst{4-0} = rs2;
+}
+
class F4<bits<6> op3, dag outs, dag ins, string asmstr, list<dag> pattern,
InstrItinClass itin = NoItinerary>
: InstSP<outs, ins, asmstr, pattern, itin> {
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td
index fd9e367..dd77432 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -47,6 +47,14 @@ def HasVIS2 : Predicate<"Subtarget->isVIS2()">,
def HasVIS3 : Predicate<"Subtarget->isVIS3()">,
AssemblerPredicate<(all_of FeatureVIS3)>;
+// HasUA2005 - This is true when the target processor has UA 2005 extensions.
+def HasUA2005 : Predicate<"Subtarget->isUA2005()">,
+ AssemblerPredicate<(all_of FeatureUA2005)>;
+
+// HasUA2007 - This is true when the target processor has UA 2007 extensions.
+def HasUA2007 : Predicate<"Subtarget->isUA2007()">,
+ AssemblerPredicate<(all_of FeatureUA2007)>;
+
// HasHardQuad - This is true when the target processor supports quad floating
// point instructions.
def HasHardQuad : Predicate<"Subtarget->hasHardQuad()">;
@@ -1023,10 +1031,10 @@ class CPBranchSPA<dag ins, string asmstr, list<dag> pattern>
} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
-def CBCOND : CPBranchSP<(ins brtarget:$imm22, CCOp:$cond),
+def CPBCOND : CPBranchSP<(ins brtarget:$imm22, CCOp:$cond),
"cb$cond $imm22",
[(SPbrfcc bb:$imm22, imm:$cond)]>;
-def CBCONDA : CPBranchSPA<(ins brtarget:$imm22, CCOp:$cond),
+def CPBCONDA : CPBranchSPA<(ins brtarget:$imm22, CCOp:$cond),
"cb$cond,a $imm22", []>;
// Section B.24 - Call and Link Instruction, p. 125
@@ -1971,4 +1979,5 @@ def : Pat<(build_vector (i32 IntRegs:$a1), (i32 IntRegs:$a2)),
include "SparcInstr64Bit.td"
include "SparcInstrVIS.td"
+include "SparcInstrUAOSA.td"
include "SparcInstrAliases.td"
diff --git a/llvm/lib/Target/Sparc/SparcInstrUAOSA.td b/llvm/lib/Target/Sparc/SparcInstrUAOSA.td
new file mode 100644
index 0000000..1e1a054
--- /dev/null
+++ b/llvm/lib/Target/Sparc/SparcInstrUAOSA.td
@@ -0,0 +1,47 @@
+//=== SparcInstrUAOSA.td - UltraSPARC/Oracle SPARC Architecture extensions ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains instruction formats, definitions and patterns needed for
+// UA 2005 and UA 2007 instructions on SPARC.
+//===----------------------------------------------------------------------===//
+
+class UA2005RegWin<string asmstr, bits<5> fcn>
+ : F3_1<2, 0b110001, (outs), (ins), asmstr, []> {
+ let rd = fcn;
+ let rs1 = 0;
+ let rs2 = 0;
+}
+
+// Convenience template for 4-operand instructions
+class FourOp<string OpcStr, bits<6> op3val, bits<4> op5val,
+ RegisterClass RC>
+ : F3_4<op3val, op5val, (outs RC:$rd), (ins RC:$rs1, RC:$rs2, RC:$rs3),
+ !strconcat(OpcStr, " $rs1, $rs2, $rs3, $rd")>;
+
+// UltraSPARC Architecture 2005 Instructions
+let Predicates = [HasUA2005] in {
+let hasSideEffects = 1 in {
+def ALLCLEAN : UA2005RegWin<"allclean", 0b00010>;
+def INVALW : UA2005RegWin<"invalw", 0b00101>;
+def NORMALW : UA2005RegWin<"normalw", 0b00100>;
+def OTHERW : UA2005RegWin<"otherw", 0b00011>;
+}
+} // Predicates = [HasUA2005]
+
+// UltraSPARC Architecture 2007 Instructions
+let Predicates = [HasUA2007] in {
+def FMADDS : FourOp<"fmadds", 0b110111, 0b0001, FPRegs>;
+def FMADDD : FourOp<"fmaddd", 0b110111, 0b0010, DFPRegs>;
+def FMSUBS : FourOp<"fmsubs", 0b110111, 0b0101, FPRegs>;
+def FMSUBD : FourOp<"fmsubd", 0b110111, 0b0110, DFPRegs>;
+
+def FNMADDS : FourOp<"fnmadds", 0b110111, 0b1101, FPRegs>;
+def FNMADDD : FourOp<"fnmaddd", 0b110111, 0b1110, DFPRegs>;
+def FNMSUBS : FourOp<"fnmsubs", 0b110111, 0b1001, FPRegs>;
+def FNMSUBD : FourOp<"fnmsubd", 0b110111, 0b1010, DFPRegs>;
+} // Predicates = [HasUA2007]
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 86b4d82..642a9cf 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -2378,7 +2378,7 @@ bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
// Drop the optional '.'.
StringRef DotDispStr = Tok.getString();
DotDispStr.consume_front(".");
- StringRef TrailingDot;
+ bool TrailingDot = false;
// .Imm gets lexed as a real.
if (Tok.is(AsmToken::Real)) {
@@ -2388,10 +2388,7 @@ bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
Info.Offset = DotDisp.getZExtValue();
} else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
Tok.is(AsmToken::Identifier)) {
- if (DotDispStr.ends_with(".")) {
- TrailingDot = DotDispStr.substr(DotDispStr.size() - 1);
- DotDispStr = DotDispStr.drop_back(1);
- }
+ TrailingDot = DotDispStr.consume_back(".");
const std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
const StringRef Base = BaseMember.first, Member = BaseMember.second;
if (getParser().lookUpField(SM.getType(), DotDispStr, Info) &&
@@ -2409,8 +2406,8 @@ bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
const char *DotExprEndLoc = DotDispStr.data() + DotDispStr.size();
while (Tok.getLoc().getPointer() < DotExprEndLoc)
Lex();
- if (!TrailingDot.empty())
- getLexer().UnLex(AsmToken(AsmToken::Dot, TrailingDot));
+ if (TrailingDot)
+ getLexer().UnLex(AsmToken(AsmToken::Dot, "."));
SM.addImm(Info.Offset);
SM.setTypeInfo(Info.Type);
return false;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9f75fe8..e3bb5db 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -24573,9 +24573,11 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
if (CC == ISD::SETOEQ || CC == ISD::SETUNE) {
auto NewCC = (CC == ISD::SETOEQ) ? X86::COND_E : (X86::COND_NE);
assert(Op0.getSimpleValueType() != MVT::bf16 && "Unsupported Type");
- if (Op0.getSimpleValueType() != MVT::f80)
- return getSETCC(
+ if (Op0.getSimpleValueType() != MVT::f80) {
+ SDValue Res = getSETCC(
NewCC, DAG.getNode(X86ISD::UCOMX, dl, MVT::i32, Op0, Op1), dl, DAG);
+ return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
+ }
}
}
// Handle floating point.
@@ -60829,7 +60831,7 @@ static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
- const std::string &AsmStr = IA->getAsmString();
+ StringRef AsmStr = IA->getAsmString();
IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
@@ -62046,7 +62048,7 @@ bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
// No inline stack probe for Windows, they have their own mechanism.
- if (Subtarget.isOSWindows() ||
+ if (Subtarget.isOSWindows() || Subtarget.isUEFI() ||
MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
return false;
@@ -62072,7 +62074,8 @@ X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
// Generally, if we aren't on Windows, the platform ABI does not include
// support for stack probes, so don't emit them.
- if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
+ if ((!Subtarget.isOSWindows() && !Subtarget.isUEFI()) ||
+ Subtarget.isTargetMachO() ||
MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
return "";
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 5220ae2..963a2bb84e 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -8122,6 +8122,14 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
+ // Do not fold a NDD instruction and a memory instruction with relocation to
+ // avoid emit APX relocation when the flag is disabled for backward
+ // compatibility.
+ uint64_t TSFlags = MI.getDesc().TSFlags;
+ if (!X86EnableAPXForRelocation && isMemInstrWithGOTPCREL(LoadMI) &&
+ X86II::hasNewDataDest(TSFlags))
+ return nullptr;
+
// Determine the alignment of the load.
Align Alignment;
unsigned LoadOpc = LoadMI.getOpcode();
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 2a9f567..e53f256 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -187,6 +187,40 @@ inline static bool isAddMemInstrWithRelocation(const MachineInstr &MI) {
return false;
}
+inline static bool isMemInstrWithGOTPCREL(const MachineInstr &MI) {
+ unsigned Op = MI.getOpcode();
+ switch (Op) {
+ case X86::TEST32mr:
+ case X86::TEST64mr:
+ case X86::CMP32rm:
+ case X86::CMP64rm:
+ case X86::MOV32rm:
+ case X86::MOV64rm:
+ case X86::ADC32rm:
+ case X86::ADD32rm:
+ case X86::AND32rm:
+ case X86::OR32rm:
+ case X86::SBB32rm:
+ case X86::SUB32rm:
+ case X86::XOR32rm:
+ case X86::ADC64rm:
+ case X86::ADD64rm:
+ case X86::AND64rm:
+ case X86::OR64rm:
+ case X86::SBB64rm:
+ case X86::SUB64rm:
+ case X86::XOR64rm: {
+ int MemOpNo = X86II::getMemoryOperandNo(MI.getDesc().TSFlags) +
+ X86II::getOperandBias(MI.getDesc());
+ const MachineOperand &MO = MI.getOperand(X86::AddrDisp + MemOpNo);
+ if (MO.getTargetFlags() == X86II::MO_GOTPCREL)
+ return true;
+ break;
+ }
+ }
+ return false;
+}
+
class X86InstrInfo final : public X86GenInstrInfo {
X86Subtarget &Subtarget;
const X86RegisterInfo RI;
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index ef58c76..c192e88 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -50,6 +50,8 @@ static cl::opt<bool>
cl::desc("Disable two address hints for register "
"allocation"));
+extern cl::opt<bool> X86EnableAPXForRelocation;
+
X86RegisterInfo::X86RegisterInfo(const Triple &TT)
: X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
X86_MC::getDwarfRegFlavour(TT, false),
@@ -121,6 +123,11 @@ X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
if (RC == &X86::GR8_NOREXRegClass)
return RC;
+ // Keep using non-rex2 register class when APX feature (EGPR/NDD/NF) is not
+ // enabled for relocation.
+ if (!X86EnableAPXForRelocation && isNonRex2RegClass(RC))
+ return RC;
+
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
const TargetRegisterClass *Super = RC;
@@ -1258,3 +1265,18 @@ const TargetRegisterClass *X86RegisterInfo::constrainRegClassToNonRex2(
return &X86::GR64_NOREX2_NOSPRegClass;
}
}
+
+bool X86RegisterInfo::isNonRex2RegClass(const TargetRegisterClass *RC) const {
+ switch (RC->getID()) {
+ default:
+ return false;
+ case X86::GR8_NOREX2RegClassID:
+ case X86::GR16_NOREX2RegClassID:
+ case X86::GR32_NOREX2RegClassID:
+ case X86::GR64_NOREX2RegClassID:
+ case X86::GR32_NOREX2_NOSPRegClassID:
+ case X86::GR64_NOREX2_NOSPRegClassID:
+ case X86::GR64_with_sub_16bit_in_GR16_NOREX2RegClassID:
+ return true;
+ }
+} \ No newline at end of file
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.h b/llvm/lib/Target/X86/X86RegisterInfo.h
index 13a5fbf..19b409a 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -178,6 +178,8 @@ public:
const TargetRegisterClass *
constrainRegClassToNonRex2(const TargetRegisterClass *RC) const;
+
+ bool isNonRex2RegClass(const TargetRegisterClass *RC) const;
};
} // End llvm namespace
diff --git a/llvm/lib/Target/X86/X86SuppressAPXForReloc.cpp b/llvm/lib/Target/X86/X86SuppressAPXForReloc.cpp
index d40995c..68b6dde 100644
--- a/llvm/lib/Target/X86/X86SuppressAPXForReloc.cpp
+++ b/llvm/lib/Target/X86/X86SuppressAPXForReloc.cpp
@@ -65,9 +65,8 @@ FunctionPass *llvm::createX86SuppressAPXForRelocationPass() {
return new X86SuppressAPXForRelocationPass();
}
-static void suppressEGPRRegClass(MachineFunction &MF, MachineInstr &MI,
+static void suppressEGPRRegClass(MachineRegisterInfo *MRI, MachineInstr &MI,
const X86Subtarget &ST, unsigned int OpNum) {
- MachineRegisterInfo *MRI = &MF.getRegInfo();
Register Reg = MI.getOperand(OpNum).getReg();
if (!Reg.isVirtual()) {
assert(!X86II::isApxExtendedReg(Reg) && "APX EGPR is used unexpectedly.");
@@ -79,11 +78,30 @@ static void suppressEGPRRegClass(MachineFunction &MF, MachineInstr &MI,
MRI->setRegClass(Reg, NewRC);
}
+// Suppress EGPR in operand 0 of uses to avoid APX relocation types emitted. The
+// register in operand 0 of instruction with relocation may be replaced with
+// operand 0 of uses which may be EGPR. That may lead to emit APX relocation
+// types which breaks the backward compatibility with builtin linkers on
+// existing OS. For example, the register in operand 0 of instruction with
+// relocation is used in PHI instruction, and it may be replaced with operand 0
+// of PHI instruction after PHI elimination and Machine Copy Propagation pass.
+static void suppressEGPRRegClassInRegAndUses(MachineRegisterInfo *MRI,
+ MachineInstr &MI,
+ const X86Subtarget &ST,
+ unsigned int OpNum) {
+ suppressEGPRRegClass(MRI, MI, ST, OpNum);
+ Register Reg = MI.getOperand(OpNum).getReg();
+ for (MachineInstr &Use : MRI->use_instructions(Reg))
+ if (Use.getOpcode() == X86::PHI)
+ suppressEGPRRegClass(MRI, Use, ST, 0);
+}
+
static bool handleInstructionWithEGPR(MachineFunction &MF,
const X86Subtarget &ST) {
if (!ST.hasEGPR())
return false;
+ MachineRegisterInfo *MRI = &MF.getRegInfo();
auto suppressEGPRInInstrWithReloc = [&](MachineInstr &MI,
ArrayRef<unsigned> OpNoArray) {
int MemOpNo = X86II::getMemoryOperandNo(MI.getDesc().TSFlags) +
@@ -94,7 +112,7 @@ static bool handleInstructionWithEGPR(MachineFunction &MF,
LLVM_DEBUG(dbgs() << "Transform instruction with relocation type:\n "
<< MI);
for (unsigned OpNo : OpNoArray)
- suppressEGPRRegClass(MF, MI, ST, OpNo);
+ suppressEGPRRegClassInRegAndUses(MRI, MI, ST, OpNo);
LLVM_DEBUG(dbgs() << "to:\n " << MI << "\n");
}
};
@@ -167,7 +185,8 @@ static bool handleNDDOrNFInstructions(MachineFunction &MF,
int MemOpNo = X86II::getMemoryOperandNo(MI.getDesc().TSFlags) +
X86II::getOperandBias(MI.getDesc());
const MachineOperand &MO = MI.getOperand(X86::AddrDisp + MemOpNo);
- if (MO.getTargetFlags() == X86II::MO_GOTTPOFF) {
+ if (MO.getTargetFlags() == X86II::MO_GOTTPOFF ||
+ MO.getTargetFlags() == X86II::MO_GOTPCREL) {
LLVM_DEBUG(dbgs() << "Transform instruction with relocation type:\n "
<< MI);
Register Reg = MRI->createVirtualRegister(&X86::GR64_NOREX2RegClass);
@@ -178,7 +197,7 @@ static bool handleNDDOrNFInstructions(MachineFunction &MF,
MI.getOperand(1).setReg(Reg);
const MCInstrDesc &NewDesc = TII->get(X86::ADD64rm);
MI.setDesc(NewDesc);
- suppressEGPRRegClass(MF, MI, ST, 0);
+ suppressEGPRRegClassInRegAndUses(MRI, MI, ST, 0);
MI.tieOperands(0, 1);
LLVM_DEBUG(dbgs() << "to:\n " << *CopyMIB << "\n");
LLVM_DEBUG(dbgs() << " " << MI << "\n");
@@ -191,7 +210,7 @@ static bool handleNDDOrNFInstructions(MachineFunction &MF,
if (MO.getTargetFlags() == X86II::MO_GOTTPOFF) {
LLVM_DEBUG(dbgs() << "Transform instruction with relocation type:\n "
<< MI);
- suppressEGPRRegClass(MF, MI, ST, 0);
+ suppressEGPRRegClassInRegAndUses(MRI, MI, ST, 0);
Register Reg = MRI->createVirtualRegister(&X86::GR64_NOREX2RegClass);
[[maybe_unused]] MachineInstrBuilder CopyMIB =
BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(TargetOpcode::COPY),
diff --git a/llvm/lib/TargetParser/ARMTargetParserCommon.cpp b/llvm/lib/TargetParser/ARMTargetParserCommon.cpp
index e2ed8df..89d5e0d 100644
--- a/llvm/lib/TargetParser/ARMTargetParserCommon.cpp
+++ b/llvm/lib/TargetParser/ARMTargetParserCommon.cpp
@@ -82,9 +82,9 @@ StringRef ARM::getCanonicalArchName(StringRef Arch) {
// Ex. "armebv7", move past the "eb".
if (offset != StringRef::npos && A.substr(offset, 2) == "eb")
offset += 2;
- // Or, if it ends with eb ("armv7eb"), chop it off.
- else if (A.ends_with("eb"))
- A = A.substr(0, A.size() - 2);
+ else
+ // Or, if it ends with eb ("armv7eb"), chop it off.
+ A.consume_back("eb");
// Trim the head
if (offset != StringRef::npos)
A = A.substr(offset);
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index a72c1d3..8b84363 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -10909,9 +10909,7 @@ struct AAPotentialValuesImpl : AAPotentialValues {
return II.I == I && II.S == S;
};
bool operator<(const ItemInfo &II) const {
- if (I == II.I)
- return S < II.S;
- return I < II.I;
+ return std::tie(I, S) < std::tie(II.I, II.S);
};
};
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index d855647..ebabece 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -285,8 +285,6 @@ class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
// module and its jumptable entry needs to be exported to thinlto backends.
bool IsExported;
- size_t numTrailingObjects(OverloadToken<MDNode *>) const { return NTypes; }
-
public:
static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
bool IsJumpTableCanonical, bool IsExported,
@@ -297,7 +295,7 @@ public:
GTM->NTypes = Types.size();
GTM->IsJumpTableCanonical = IsJumpTableCanonical;
GTM->IsExported = IsExported;
- llvm::copy(Types, GTM->getTrailingObjects<MDNode *>());
+ llvm::copy(Types, GTM->getTrailingObjects());
return GTM;
}
@@ -313,9 +311,7 @@ public:
return IsExported;
}
- ArrayRef<MDNode *> types() const {
- return ArrayRef(getTrailingObjects<MDNode *>(), NTypes);
- }
+ ArrayRef<MDNode *> types() const { return getTrailingObjects(NTypes); }
};
struct ICallBranchFunnel final
@@ -329,13 +325,13 @@ struct ICallBranchFunnel final
Call->CI = CI;
Call->UniqueId = UniqueId;
Call->NTargets = Targets.size();
- llvm::copy(Targets, Call->getTrailingObjects<GlobalTypeMember *>());
+ llvm::copy(Targets, Call->getTrailingObjects());
return Call;
}
CallInst *CI;
ArrayRef<GlobalTypeMember *> targets() const {
- return ArrayRef(getTrailingObjects<GlobalTypeMember *>(), NTargets);
+ return getTrailingObjects(NTargets);
}
unsigned UniqueId;
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 206d41e..ed94866 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2271,6 +2271,27 @@ Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
}
}
+ // Similar to the combine above, but handles the case for scalable vectors
+ // where both shuffle(V1, 0) and C are splats.
+ //
+ // Op(shuffle(V1, 0), (splat C)) -> shuffle(Op(V1, (splat C)), 0)
+ if (isa<ScalableVectorType>(Inst.getType()) &&
+ match(&Inst, m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Poison(),
+ m_ZeroMask())),
+ m_ImmConstant(C)))) {
+ if (Constant *Splat = C->getSplatValue()) {
+ bool ConstOp1 = isa<Constant>(RHS);
+ VectorType *V1Ty = cast<VectorType>(V1->getType());
+ Constant *NewC = ConstantVector::getSplat(V1Ty->getElementCount(), Splat);
+
+ Value *NewLHS = ConstOp1 ? V1 : NewC;
+ Value *NewRHS = ConstOp1 ? NewC : V1;
+ VectorType *VTy = cast<VectorType>(Inst.getType());
+ SmallVector<int> Mask(VTy->getElementCount().getKnownMinValue(), 0);
+ return createBinOpShuffle(NewLHS, NewRHS, Mask);
+ }
+ }
+
// Try to reassociate to sink a splat shuffle after a binary operation.
if (Inst.isAssociative() && Inst.isCommutative()) {
// Canonicalize shuffle operand as LHS.
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 42bccc8..62ef40c 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -2538,6 +2538,7 @@ static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
IsInBounds);
GEP->replaceAllUsesWith(NewGEP);
eraseInstruction(*GEP, SafetyInfo, MSSAU);
+ salvageDebugInfo(*Src);
eraseInstruction(*Src, SafetyInfo, MSSAU);
return true;
}
@@ -2592,7 +2593,10 @@ static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
ICmp.setPredicate(Pred);
ICmp.setOperand(0, VariantOp);
ICmp.setOperand(1, NewCmpOp);
- eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
+
+ Instruction &DeadI = cast<Instruction>(*VariantLHS);
+ salvageDebugInfo(DeadI);
+ eraseInstruction(DeadI, SafetyInfo, MSSAU);
return true;
}
@@ -2670,7 +2674,10 @@ static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
ICmp.setPredicate(Pred);
ICmp.setOperand(0, VariantOp);
ICmp.setOperand(1, NewCmpOp);
- eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
+
+ Instruction &DeadI = cast<Instruction>(*VariantLHS);
+ salvageDebugInfo(DeadI);
+ eraseInstruction(DeadI, SafetyInfo, MSSAU);
return true;
}
@@ -2877,8 +2884,10 @@ static bool hoistBOAssociation(Instruction &I, Loop &L,
// (LV op C1) might not be erased if it has more uses than the one we just
// replaced.
- if (BO0->use_empty())
+ if (BO0->use_empty()) {
+ salvageDebugInfo(*BO0);
eraseInstruction(*BO0, SafetyInfo, MSSAU);
+ }
return true;
}
diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index e10d0c0..0ac1a15 100644
--- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -58,6 +58,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/LoopVersioning.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
@@ -224,6 +225,7 @@ public:
// Delete the instructions backwards, as it has a reduced likelihood of
// having to update as many def-use and use-def chains.
for (auto *Inst : reverse(Unused)) {
+ salvageDebugInfo(*Inst);
if (!Inst->use_empty())
Inst->replaceAllUsesWith(PoisonValue::get(Inst->getType()));
Inst->eraseFromParent();
diff --git a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
index 6e91c4f..6b4fc88 100644
--- a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
@@ -164,7 +164,6 @@ private:
bool legalLoopInstructions();
bool legalLoopMemoryAccesses();
bool isLoopAlreadyVisited();
- void setNoAliasToLoop(Loop *VerLoop);
bool instructionSafeForVersioning(Instruction *I);
};
@@ -344,6 +343,13 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
}
LoadAndStoreCounter++;
Value *Ptr = St->getPointerOperand();
+ // Don't allow stores that we don't have runtime checks for, as we won't be
+ // able to mark them noalias meaning they would prevent any code motion.
+ auto &Pointers = LAI->getRuntimePointerChecking()->Pointers;
+ if (!any_of(Pointers, [&](auto &P) { return P.PointerValue == Ptr; })) {
+ LLVM_DEBUG(dbgs() << " Found a store without a runtime check.\n");
+ return false;
+ }
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
@@ -361,6 +367,13 @@ bool LoopVersioningLICM::legalLoopInstructions() {
InvariantCounter = 0;
IsReadOnlyLoop = true;
using namespace ore;
+ // Get LoopAccessInfo from current loop via the proxy.
+ LAI = &LAIs.getInfo(*CurLoop);
+ // Check LoopAccessInfo for need of runtime check.
+ if (LAI->getRuntimePointerChecking()->getChecks().empty()) {
+ LLVM_DEBUG(dbgs() << " LAA: Runtime check not found !!\n");
+ return false;
+ }
// Iterate over loop blocks and instructions of each block and check
// instruction safety.
for (auto *Block : CurLoop->getBlocks())
@@ -374,13 +387,6 @@ bool LoopVersioningLICM::legalLoopInstructions() {
return false;
}
}
- // Get LoopAccessInfo from current loop via the proxy.
- LAI = &LAIs.getInfo(*CurLoop);
- // Check LoopAccessInfo for need of runtime check.
- if (LAI->getRuntimePointerChecking()->getChecks().empty()) {
- LLVM_DEBUG(dbgs() << " LAA: Runtime check not found !!\n");
- return false;
- }
// Number of runtime-checks should be less then RuntimeMemoryCheckThreshold
if (LAI->getNumRuntimePointerChecks() >
VectorizerParams::RuntimeMemoryCheckThreshold) {
@@ -501,41 +507,6 @@ bool LoopVersioningLICM::isLegalForVersioning() {
return true;
}
-/// Update loop with aggressive aliasing assumptions.
-/// It marks no-alias to any pairs of memory operations by assuming
-/// loop should not have any must-alias memory accesses pairs.
-/// During LoopVersioningLICM legality we ignore loops having must
-/// aliasing memory accesses.
-void LoopVersioningLICM::setNoAliasToLoop(Loop *VerLoop) {
- // Get latch terminator instruction.
- Instruction *I = VerLoop->getLoopLatch()->getTerminator();
- // Create alias scope domain.
- MDBuilder MDB(I->getContext());
- MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("LVDomain");
- StringRef Name = "LVAliasScope";
- MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
- SmallVector<Metadata *, 4> Scopes{NewScope}, NoAliases{NewScope};
- // Iterate over each instruction of loop.
- // set no-alias for all load & store instructions.
- for (auto *Block : CurLoop->getBlocks()) {
- for (auto &Inst : *Block) {
- // Only interested in instruction that may modify or read memory.
- if (!Inst.mayReadFromMemory() && !Inst.mayWriteToMemory())
- continue;
- // Set no-alias for current instruction.
- Inst.setMetadata(
- LLVMContext::MD_noalias,
- MDNode::concatenate(Inst.getMetadata(LLVMContext::MD_noalias),
- MDNode::get(Inst.getContext(), NoAliases)));
- // set alias-scope for current instruction.
- Inst.setMetadata(
- LLVMContext::MD_alias_scope,
- MDNode::concatenate(Inst.getMetadata(LLVMContext::MD_alias_scope),
- MDNode::get(Inst.getContext(), Scopes)));
- }
- }
-}
-
bool LoopVersioningLICM::run(DominatorTree *DT) {
// Do not do the transformation if disabled by metadata.
if (hasLICMVersioningTransformation(CurLoop) & TM_Disable)
@@ -563,7 +534,7 @@ bool LoopVersioningLICM::run(DominatorTree *DT) {
addStringMetadataToLoop(LVer.getVersionedLoop(),
"llvm.mem.parallel_loop_access");
// Update version loop with aggressive aliasing assumption.
- setNoAliasToLoop(LVer.getVersionedLoop());
+ LVer.annotateLoopWithNoAlias();
Changed = true;
}
return Changed;
diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index dce10c0..6608515 100644
--- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -168,8 +168,8 @@ bool llvm::DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI,
SmallVector<WeakTrackingVH, 8> PHIs(llvm::make_pointer_range(BB->phis()));
bool Changed = false;
- for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
- if (PHINode *PN = dyn_cast_or_null<PHINode>(PHIs[i].operator Value*()))
+ for (const auto &PHI : PHIs)
+ if (PHINode *PN = dyn_cast_or_null<PHINode>(PHI.operator Value *()))
Changed |= RecursivelyDeleteDeadPHINode(PN, TLI, MSSAU);
return Changed;
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 1db01b2..3dbd605 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -4220,12 +4220,19 @@ void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
}
bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
+ const auto *Op = I->getOperand(OpIdx);
// We can't have a PHI with a metadata type.
- if (I->getOperand(OpIdx)->getType()->isMetadataTy())
+ if (Op->getType()->isMetadataTy())
+ return false;
+
+ // swifterror pointers can only be used by a load, store, or as a swifterror
+ // argument; swifterror pointers are not allowed to be used in select or phi
+ // instructions.
+ if (Op->isSwiftError())
return false;
// Early exit.
- if (!isa<Constant, InlineAsm>(I->getOperand(OpIdx)))
+ if (!isa<Constant, InlineAsm>(Op))
return true;
switch (I->getOpcode()) {
diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index c0c5823..6ba6496 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -931,13 +931,10 @@ void PromoteMem2Reg::run() {
// hasn't traversed. If this is the case, the PHI nodes may not
// have incoming values for all predecessors. Loop over all PHI nodes we have
// created, inserting poison values if they are missing any incoming values.
- for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
- I = NewPhiNodes.begin(),
- E = NewPhiNodes.end();
- I != E; ++I) {
+ for (const auto &PhiNode : NewPhiNodes) {
// We want to do this once per basic block. As such, only process a block
// when we find the PHI that is the first entry in the block.
- PHINode *SomePHI = I->second;
+ PHINode *SomePHI = PhiNode.second;
BasicBlock *BB = SomePHI->getParent();
if (&BB->front() != SomePHI)
continue;
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 0f7e15b..d94abea 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -2208,14 +2208,6 @@ static bool canSinkInstructions(
if (!I->isSameOperationAs(I0, Instruction::CompareUsingIntersectedAttrs))
return false;
- // swifterror pointers can only be used by a load or store; sinking a load
- // or store would require introducing a select for the pointer operand,
- // which isn't allowed for swifterror pointers.
- if (isa<StoreInst>(I) && I->getOperand(1)->isSwiftError())
- return false;
- if (isa<LoadInst>(I) && I->getOperand(0)->isSwiftError())
- return false;
-
// Treat MMRAs conservatively. This pass can be quite aggressive and
// could drop a lot of MMRAs otherwise.
if (MMRAMetadata(*I) != I0MMRA)
@@ -7080,8 +7072,8 @@ static bool switchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
APInt One(TableSizePowOf2, 1);
// Build bitmask; fill in a 1 bit for every case.
const ResultListTy &ResultList = ResultLists[PHIs[0]];
- for (size_t I = 0, E = ResultList.size(); I != E; ++I) {
- uint64_t Idx = (ResultList[I].first->getValue() - TableIndexOffset->getValue())
+ for (const auto &Result : ResultList) {
+ uint64_t Idx = (Result.first->getValue() - TableIndexOffset->getValue())
.getLimitedValue();
MaskInt |= One << Idx;
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 0b8b0c7..a1cedbbf 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -1446,13 +1446,13 @@ public:
/// Returns true if the predicated reduction select should be used to set the
/// incoming value for the reduction phi.
- bool usePredicatedReductionSelect(unsigned Opcode, Type *PhiTy) const {
+ bool usePredicatedReductionSelect() const {
// Force to use predicated reduction select since the EVL of the
// second-to-last iteration might not be VF*UF.
if (foldTailWithEVL())
return true;
return PreferPredicatedReductionSelect ||
- TTI.preferPredicatedReductionSelect(Opcode, PhiTy);
+ TTI.preferPredicatedReductionSelect();
}
/// Estimate cost of an intrinsic call instruction CI if it were vectorized
@@ -4985,7 +4985,6 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
if (Legal->hasUncountableEarlyExit())
return 1;
- auto BestKnownTC = getSmallBestKnownTC(PSE, TheLoop);
const bool HasReductions = !Legal->getReductionVars().empty();
// If we did not calculate the cost for VF (because the user selected the VF)
@@ -5062,51 +5061,53 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
}
unsigned EstimatedVF = getEstimatedRuntimeVF(VF, VScaleForTuning);
- unsigned KnownTC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
- if (KnownTC > 0) {
- // At least one iteration must be scalar when this constraint holds. So the
- // maximum available iterations for interleaving is one less.
- unsigned AvailableTC =
- requiresScalarEpilogue(VF.isVector()) ? KnownTC - 1 : KnownTC;
-
- // If trip count is known we select between two prospective ICs, where
- // 1) the aggressive IC is capped by the trip count divided by VF
- // 2) the conservative IC is capped by the trip count divided by (VF * 2)
- // The final IC is selected in a way that the epilogue loop trip count is
- // minimized while maximizing the IC itself, so that we either run the
- // vector loop at least once if it generates a small epilogue loop, or else
- // we run the vector loop at least twice.
-
- unsigned InterleaveCountUB = bit_floor(
- std::max(1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
- unsigned InterleaveCountLB = bit_floor(std::max(
- 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
- MaxInterleaveCount = InterleaveCountLB;
-
- if (InterleaveCountUB != InterleaveCountLB) {
- unsigned TailTripCountUB =
- (AvailableTC % (EstimatedVF * InterleaveCountUB));
- unsigned TailTripCountLB =
- (AvailableTC % (EstimatedVF * InterleaveCountLB));
- // If both produce same scalar tail, maximize the IC to do the same work
- // in fewer vector loop iterations
- if (TailTripCountUB == TailTripCountLB)
- MaxInterleaveCount = InterleaveCountUB;
- }
- } else if (BestKnownTC) {
+
+ // Try to get the exact trip count, or an estimate based on profiling data or
+ // ConstantMax from PSE, failing that.
+ if (auto BestKnownTC = getSmallBestKnownTC(PSE, TheLoop)) {
// At least one iteration must be scalar when this constraint holds. So the
// maximum available iterations for interleaving is one less.
unsigned AvailableTC = requiresScalarEpilogue(VF.isVector())
? (*BestKnownTC) - 1
: *BestKnownTC;
- // If trip count is an estimated compile time constant, limit the
- // IC to be capped by the trip count divided by VF * 2, such that the vector
- // loop runs at least twice to make interleaving seem profitable when there
- // is an epilogue loop present. Since exact Trip count is not known we
- // choose to be conservative in our IC estimate.
- MaxInterleaveCount = bit_floor(std::max(
+ unsigned InterleaveCountLB = bit_floor(std::max(
1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
+
+ if (PSE.getSE()->getSmallConstantTripCount(TheLoop) > 0) {
+ // If the best known trip count is exact, we select between two
+ // prospective ICs, where
+ //
+ // 1) the aggressive IC is capped by the trip count divided by VF
+ // 2) the conservative IC is capped by the trip count divided by (VF * 2)
+ //
+ // The final IC is selected in a way that the epilogue loop trip count is
+ // minimized while maximizing the IC itself, so that we either run the
+ // vector loop at least once if it generates a small epilogue loop, or
+ // else we run the vector loop at least twice.
+
+ unsigned InterleaveCountUB = bit_floor(std::max(
+ 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
+ MaxInterleaveCount = InterleaveCountLB;
+
+ if (InterleaveCountUB != InterleaveCountLB) {
+ unsigned TailTripCountUB =
+ (AvailableTC % (EstimatedVF * InterleaveCountUB));
+ unsigned TailTripCountLB =
+ (AvailableTC % (EstimatedVF * InterleaveCountLB));
+ // If both produce same scalar tail, maximize the IC to do the same work
+ // in fewer vector loop iterations
+ if (TailTripCountUB == TailTripCountLB)
+ MaxInterleaveCount = InterleaveCountUB;
+ }
+ } else {
+ // If trip count is an estimated compile time constant, limit the
+ // IC to be capped by the trip count divided by VF * 2, such that the
+ // vector loop runs at least twice to make interleaving seem profitable
+ // when there is an epilogue loop present. Since exact Trip count is not
+ // known we choose to be conservative in our IC estimate.
+ MaxInterleaveCount = InterleaveCountLB;
+ }
}
assert(MaxInterleaveCount > 0 &&
@@ -8418,7 +8419,7 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
VPValue *Mask = nullptr;
if (Legal->isMaskRequired(I))
- Mask = getBlockInMask(I->getParent());
+ Mask = getBlockInMask(Builder.getInsertBlock());
// Determine if the pointer operand of the access is either consecutive or
// reverse consecutive.
@@ -8645,7 +8646,7 @@ VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
// all-true mask.
VPValue *Mask = nullptr;
if (Legal->isMaskRequired(CI))
- Mask = getBlockInMask(CI->getParent());
+ Mask = getBlockInMask(Builder.getInsertBlock());
else
Mask = Plan.getOrAddLiveIn(
ConstantInt::getTrue(IntegerType::getInt1Ty(CI->getContext())));
@@ -8687,7 +8688,7 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
// div/rem operation itself. Otherwise fall through to general handling below.
if (CM.isPredicatedInst(I)) {
SmallVector<VPValue *> Ops(Operands);
- VPValue *Mask = getBlockInMask(I->getParent());
+ VPValue *Mask = getBlockInMask(Builder.getInsertBlock());
VPValue *One =
Plan.getOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
auto *SafeRHS = Builder.createSelect(Mask, Ops[1], One, I->getDebugLoc());
@@ -8769,7 +8770,7 @@ VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
// In case of predicated execution (due to tail-folding, or conditional
// execution, or both), pass the relevant mask.
if (Legal->isMaskRequired(HI->Store))
- HGramOps.push_back(getBlockInMask(HI->Store->getParent()));
+ HGramOps.push_back(getBlockInMask(Builder.getInsertBlock()));
return new VPHistogramRecipe(Opcode, HGramOps, HI->Store->getDebugLoc());
}
@@ -8823,7 +8824,7 @@ VPRecipeBuilder::handleReplication(Instruction *I, ArrayRef<VPValue *> Operands,
// added initially. Masked replicate recipes will later be placed under an
// if-then construct to prevent side-effects. Generate recipes to compute
// the block mask for this region.
- BlockInMask = getBlockInMask(I->getParent());
+ BlockInMask = getBlockInMask(Builder.getInsertBlock());
}
// Note that there is some custom logic to mark some intrinsics as uniform
@@ -9067,7 +9068,7 @@ VPRecipeBuilder::tryToCreatePartialReduction(Instruction *Reduction,
ReductionOpcode == Instruction::Sub) &&
"Expected an ADD or SUB operation for predicated partial "
"reductions (because the neutral element in the mask is zero)!");
- Cond = getBlockInMask(Reduction->getParent());
+ Cond = getBlockInMask(Builder.getInsertBlock());
VPValue *Zero =
Plan.getOrAddLiveIn(ConstantInt::get(Reduction->getType(), 0));
BinOp = Builder.createSelect(Cond, BinOp, Zero, Reduction->getDebugLoc());
@@ -9383,7 +9384,8 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range,
VPlanTransforms::prepareForVectorization(
*Plan, Legal->getWidestInductionType(), PSE, RequiresScalarEpilogueCheck,
CM.foldTailByMasking(), OrigLoop,
- getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()));
+ getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()),
+ Legal->hasUncountableEarlyExit(), Range);
VPlanTransforms::createLoopRegions(*Plan);
// Don't use getDecisionAndClampRange here, because we don't know the UF
@@ -9464,7 +9466,6 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range,
HeaderVPBB);
VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi();
- VPBlockBase *PrevVPBB = nullptr;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
// Create mask based on the IR BB corresponding to VPBB.
// TODO: Predicate directly based on VPlan.
@@ -9548,7 +9549,10 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range,
"Unexpected multidef recipe");
R.eraseFromParent();
}
+ }
+ VPBlockBase *PrevVPBB = nullptr;
+ for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
// Flatten the CFG in the loop. Masks for blocks have already been generated
// and added to recipes as needed. To do so, first disconnect VPBB from its
// successors. Then connect VPBB to the previously visited VPBB.
@@ -9582,12 +9586,6 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range,
R->setOperand(1, WideIV->getStepValue());
}
- if (auto *UncountableExitingBlock =
- Legal->getUncountableEarlyExitingBlock()) {
- VPlanTransforms::runPass(VPlanTransforms::handleUncountableEarlyExit, *Plan,
- OrigLoop, UncountableExitingBlock, RecipeBuilder,
- Range);
- }
DenseMap<VPValue *, VPValue *> IVEndValues;
addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
SetVector<VPIRInstruction *> ExitUsersToFix =
@@ -9685,7 +9683,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
auto Plan = VPlanTransforms::buildPlainCFG(OrigLoop, *LI, VPB2IRBB);
VPlanTransforms::prepareForVectorization(
*Plan, Legal->getWidestInductionType(), PSE, true, false, OrigLoop,
- getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()));
+ getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), false,
+ Range);
VPlanTransforms::createLoopRegions(*Plan);
for (ElementCount VF : Range)
@@ -9847,10 +9846,9 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
"PreviousLink must be the operand other than VecOp");
}
- BasicBlock *BB = CurrentLinkI->getParent();
VPValue *CondOp = nullptr;
- if (CM.blockNeedsPredicationForAnyReason(BB))
- CondOp = RecipeBuilder.getBlockInMask(BB);
+ if (CM.blockNeedsPredicationForAnyReason(CurrentLinkI->getParent()))
+ CondOp = RecipeBuilder.getBlockInMask(CurrentLink->getParent());
// Non-FP RdxDescs will have all fast math flags set, so clear them.
FastMathFlags FMFs = isa<FPMathOperator>(CurrentLinkI)
@@ -9893,7 +9891,7 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
// different numbers of lanes. Partial reductions mask the input instead.
if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
!isa<VPPartialReductionRecipe>(OrigExitingVPV->getDefiningRecipe())) {
- VPValue *Cond = RecipeBuilder.getBlockInMask(OrigLoop->getHeader());
+ VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent());
Type *PhiTy = PhiR->getOperand(0)->getLiveInIRValue()->getType();
std::optional<FastMathFlags> FMFs =
PhiTy->isFloatingPointTy()
@@ -9908,8 +9906,7 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
cast<VPInstruction>(&U)->getOpcode() ==
VPInstruction::ComputeFindLastIVResult);
});
- if (CM.usePredicatedReductionSelect(
- PhiR->getRecurrenceDescriptor().getOpcode(), PhiTy))
+ if (CM.usePredicatedReductionSelect())
PhiR->setOperand(1, NewExitingVPV);
}
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 7fbbb26..45cf4e1 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -995,7 +995,10 @@ class BinOpSameOpcodeHelper {
Value *LHS = I->getOperand(1 - Pos);
Constant *RHS =
ConstantInt::get(I->getOperand(Pos)->getType(), ToCIValue);
- if (Pos == 1)
+ // constant + x cannot be -constant - x
+ // instead, it should be x - -constant
+ if (Pos == 1 ||
+ (FromOpcode == Instruction::Add && ToOpcode == Instruction::Sub))
return SmallVector<Value *>({LHS, RHS});
return SmallVector<Value *>({RHS, LHS});
}
@@ -2855,9 +2858,13 @@ public:
}
/// Go through the instructions in VL and append their operands.
- void appendOperandsOfVL(ArrayRef<Value *> VL, const InstructionsState &S) {
- assert(!VL.empty() && "Bad VL");
- assert((empty() || VL.size() == getNumLanes()) &&
+ void appendOperands(ArrayRef<Value *> VL, ArrayRef<ValueList> Operands,
+ const InstructionsState &S) {
+ assert(!Operands.empty() && !VL.empty() && "Bad list of operands");
+ assert((empty() || all_of(Operands,
+ [this](const ValueList &VL) {
+ return VL.size() == getNumLanes();
+ })) &&
"Expected same number of lanes");
assert(S.valid() && "InstructionsState is invalid.");
// IntrinsicInst::isCommutative returns true if swapping the first "two"
@@ -2866,7 +2873,7 @@ public:
Instruction *MainOp = S.getMainOp();
unsigned NumOperands = MainOp->getNumOperands();
ArgSize = isa<IntrinsicInst>(MainOp) ? IntrinsicNumOperands : NumOperands;
- OpsVec.resize(NumOperands);
+ OpsVec.resize(ArgSize);
unsigned NumLanes = VL.size();
for (OperandDataVec &Ops : OpsVec)
Ops.resize(NumLanes);
@@ -2874,18 +2881,6 @@ public:
Value *V = VL[Lane];
assert((isa<Instruction>(V) || isa<PoisonValue>(V)) &&
"Expected instruction or poison value");
- if (isa<PoisonValue>(V)) {
- for (unsigned OpIdx : seq<unsigned>(NumOperands))
- OpsVec[OpIdx][Lane] = {
- PoisonValue::get(MainOp->getOperand(OpIdx)->getType()), true,
- false};
- if (auto *EI = dyn_cast<ExtractElementInst>(MainOp)) {
- OpsVec[0][Lane] = {EI->getVectorOperand(), true, false};
- } else if (auto *EV = dyn_cast<ExtractValueInst>(MainOp)) {
- OpsVec[0][Lane] = {EV->getAggregateOperand(), true, false};
- }
- continue;
- }
// Our tree has just 3 nodes: the root and two operands.
// It is therefore trivial to get the APO. We only need to check the
// opcode of V and whether the operand at OpIdx is the LHS or RHS
@@ -2896,11 +2891,16 @@ public:
// Since operand reordering is performed on groups of commutative
// operations or alternating sequences (e.g., +, -), we can safely tell
// the inverse operations by checking commutativity.
- auto [SelectedOp, Ops] = convertTo(cast<Instruction>(VL[Lane]), S);
+ if (isa<PoisonValue>(V)) {
+ for (unsigned OpIdx : seq<unsigned>(NumOperands))
+ OpsVec[OpIdx][Lane] = {Operands[OpIdx][Lane], true, false};
+ continue;
+ }
+ auto [SelectedOp, Ops] = convertTo(cast<Instruction>(V), S);
bool IsInverseOperation = !isCommutative(SelectedOp);
- for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
+ for (unsigned OpIdx : seq<unsigned>(ArgSize)) {
bool APO = (OpIdx == 0) ? false : IsInverseOperation;
- OpsVec[OpIdx][Lane] = {Ops[OpIdx], APO, false};
+ OpsVec[OpIdx][Lane] = {Operands[OpIdx][Lane], APO, false};
}
}
}
@@ -3006,12 +3006,12 @@ public:
public:
/// Initialize with all the operands of the instruction vector \p RootVL.
- VLOperands(ArrayRef<Value *> RootVL, const InstructionsState &S,
- const BoUpSLP &R)
+ VLOperands(ArrayRef<Value *> RootVL, ArrayRef<ValueList> Operands,
+ const InstructionsState &S, const BoUpSLP &R)
: TLI(*R.TLI), DL(*R.DL), SE(*R.SE), R(R),
L(R.LI->getLoopFor(S.getMainOp()->getParent())) {
// Append all the operands of RootVL.
- appendOperandsOfVL(RootVL, S);
+ appendOperands(RootVL, Operands, S);
}
/// \Returns a value vector with the operands across all lanes for the
@@ -3821,12 +3821,6 @@ private:
/// Interleaving factor for interleaved loads Vectorize nodes.
unsigned InterleaveFactor = 0;
- public:
- /// Returns interleave factor for interleave nodes.
- unsigned getInterleaveFactor() const { return InterleaveFactor; }
- /// Sets interleaving factor for the interleaving nodes.
- void setInterleave(unsigned Factor) { InterleaveFactor = Factor; }
-
/// Set this bundle's \p OpIdx'th operand to \p OpVL.
void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
if (Operands.size() < OpIdx + 1)
@@ -3838,13 +3832,16 @@ private:
copy(OpVL, Operands[OpIdx].begin());
}
- /// Set this bundle's operand from Scalars.
- void setOperand(const BoUpSLP &R, bool RequireReorder = false) {
- VLOperands Ops(Scalars, S, R);
- if (RequireReorder)
- Ops.reorder();
- for (unsigned I : seq<unsigned>(S.getMainOp()->getNumOperands()))
- setOperand(I, Ops.getVL(I));
+ public:
+ /// Returns interleave factor for interleave nodes.
+ unsigned getInterleaveFactor() const { return InterleaveFactor; }
+ /// Sets interleaving factor for the interleaving nodes.
+ void setInterleave(unsigned Factor) { InterleaveFactor = Factor; }
+
+ /// Set this bundle's operands from \p Operands.
+ void setOperands(ArrayRef<ValueList> Operands) {
+ for (unsigned I : seq<unsigned>(Operands.size()))
+ setOperand(I, Operands[I]);
}
/// Reorders operands of the node to the given mask \p Mask.
@@ -4870,12 +4867,11 @@ private:
// where their second (immediate) operand is not added. Since
// immediates do not affect scheduler behavior this is considered
// okay.
- assert(
- In &&
- (isa<ExtractValueInst, ExtractElementInst, IntrinsicInst>(In) ||
- In->getNumOperands() ==
- Bundle->getTreeEntry()->getNumOperands()) &&
- "Missed TreeEntry operands?");
+ assert(In &&
+ (isa<ExtractValueInst, ExtractElementInst, CallBase>(In) ||
+ In->getNumOperands() ==
+ Bundle->getTreeEntry()->getNumOperands()) &&
+ "Missed TreeEntry operands?");
for (unsigned OpIdx :
seq<unsigned>(Bundle->getTreeEntry()->getNumOperands()))
@@ -5980,10 +5976,15 @@ static bool isMaskedLoadCompress(
TTI.getMemoryOpCost(Instruction::Load, LoadVecTy, CommonAlignment,
LI->getPointerAddressSpace(), CostKind);
}
- if (IsStrided && !IsMasked) {
+ if (IsStrided && !IsMasked && Order.empty()) {
// Check for potential segmented(interleaved) loads.
- auto *AlignedLoadVecTy = getWidenedType(
+ VectorType *AlignedLoadVecTy = getWidenedType(
ScalarTy, getFullVectorNumberOfElements(TTI, ScalarTy, *Diff + 1));
+ if (!isSafeToLoadUnconditionally(
+ Ptr0, AlignedLoadVecTy, CommonAlignment, DL,
+ cast<LoadInst>(Order.empty() ? VL.back() : VL[Order.back()]), &AC,
+ &DT, &TLI))
+ AlignedLoadVecTy = LoadVecTy;
if (TTI.isLegalInterleavedAccessType(AlignedLoadVecTy, CompressMask[1],
CommonAlignment,
LI->getPointerAddressSpace())) {
@@ -9764,6 +9765,184 @@ bool BoUpSLP::canBuildSplitNode(ArrayRef<Value *> VL,
return true;
}
+namespace {
+/// Class accepts incoming list of values and generates the list of values
+/// for scheduling and list of operands for the new nodes.
+class InstructionsCompatibilityAnalysis {
+ DominatorTree &DT;
+ const DataLayout &DL;
+ const TargetTransformInfo &TTI;
+ const TargetLibraryInfo &TLI;
+
+ /// Builds operands for the original instructions.
+ void
+ buildOriginalOperands(const InstructionsState &S, ArrayRef<Value *> VL,
+ SmallVectorImpl<BoUpSLP::ValueList> &Operands) const {
+
+ unsigned ShuffleOrOp =
+ S.isAltShuffle() ? (unsigned)Instruction::ShuffleVector : S.getOpcode();
+ Instruction *VL0 = S.getMainOp();
+
+ switch (ShuffleOrOp) {
+ case Instruction::PHI: {
+ auto *PH = cast<PHINode>(VL0);
+
+ // Keeps the reordered operands to avoid code duplication.
+ PHIHandler Handler(DT, PH, VL);
+ Handler.buildOperands();
+ Operands.assign(PH->getNumOperands(), {});
+ for (unsigned I : seq<unsigned>(PH->getNumOperands()))
+ Operands[I].assign(Handler.getOperands(I).begin(),
+ Handler.getOperands(I).end());
+ return;
+ }
+ case Instruction::ExtractValue:
+ case Instruction::ExtractElement:
+ // This is a special case, as it does not gather, but at the same time
+ // we are not extending buildTree_rec() towards the operands.
+ Operands.assign(1, {VL.size(), VL0->getOperand(0)});
+ return;
+ case Instruction::InsertElement:
+ Operands.assign(2, {VL.size(), nullptr});
+ for (auto [Idx, V] : enumerate(VL)) {
+ auto *IE = cast<InsertElementInst>(V);
+ for (auto [OpIdx, Ops] : enumerate(Operands))
+ Ops[Idx] = IE->getOperand(OpIdx);
+ }
+ return;
+ case Instruction::Load:
+ Operands.assign(
+ 1, {VL.size(),
+ PoisonValue::get(cast<LoadInst>(VL0)->getPointerOperandType())});
+ for (auto [V, Op] : zip(VL, Operands.back())) {
+ auto *LI = dyn_cast<LoadInst>(V);
+ if (!LI)
+ continue;
+ Op = LI->getPointerOperand();
+ }
+ return;
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::SIToFP:
+ case Instruction::UIToFP:
+ case Instruction::Trunc:
+ case Instruction::FPTrunc:
+ case Instruction::BitCast:
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ case Instruction::Select:
+ case Instruction::FNeg:
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::Freeze:
+ case Instruction::Store:
+ case Instruction::ShuffleVector:
+ Operands.assign(VL0->getNumOperands(), {VL.size(), nullptr});
+ for (auto [Idx, V] : enumerate(VL)) {
+ auto *I = dyn_cast<Instruction>(V);
+ if (!I) {
+ for (auto [OpIdx, Ops] : enumerate(Operands))
+ Ops[Idx] = PoisonValue::get(VL0->getOperand(OpIdx)->getType());
+ continue;
+ }
+ auto [Op, ConvertedOps] = convertTo(I, S);
+ for (auto [OpIdx, Ops] : enumerate(Operands))
+ Ops[Idx] = ConvertedOps[OpIdx];
+ }
+ return;
+ case Instruction::GetElementPtr: {
+ Operands.assign(2, {VL.size(), nullptr});
+ // Need to cast all indices to the same type before vectorization to
+ // avoid crash.
+ // Required to be able to find correct matches between different gather
+ // nodes and reuse the vectorized values rather than trying to gather them
+ // again.
+ const unsigned IndexIdx = 1;
+ Type *VL0Ty = VL0->getOperand(IndexIdx)->getType();
+ Type *Ty =
+ all_of(VL,
+ [&](Value *V) {
+ auto *GEP = dyn_cast<GetElementPtrInst>(V);
+ return !GEP || VL0Ty == GEP->getOperand(IndexIdx)->getType();
+ })
+ ? VL0Ty
+ : DL.getIndexType(cast<GetElementPtrInst>(VL0)
+ ->getPointerOperandType()
+ ->getScalarType());
+ for (auto [Idx, V] : enumerate(VL)) {
+ auto *GEP = dyn_cast<GetElementPtrInst>(V);
+ if (!GEP) {
+ Operands[0][Idx] = V;
+ Operands[1][Idx] = ConstantInt::getNullValue(Ty);
+ continue;
+ }
+ Operands[0][Idx] = GEP->getPointerOperand();
+ auto *Op = GEP->getOperand(IndexIdx);
+ auto *CI = dyn_cast<ConstantInt>(Op);
+ Operands[1][Idx] = CI ? ConstantFoldIntegerCast(
+ CI, Ty, CI->getValue().isSignBitSet(), DL)
+ : Op;
+ }
+ return;
+ }
+ case Instruction::Call: {
+ auto *CI = cast<CallInst>(VL0);
+ Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, &TLI);
+ for (unsigned Idx : seq<unsigned>(CI->arg_size())) {
+ if (isVectorIntrinsicWithScalarOpAtArg(ID, Idx, &TTI))
+ continue;
+ auto &Ops = Operands.emplace_back();
+ for (Value *V : VL) {
+ auto *I = dyn_cast<Instruction>(V);
+ Ops.push_back(I ? I->getOperand(Idx)
+ : PoisonValue::get(VL0->getOperand(Idx)->getType()));
+ }
+ }
+ return;
+ }
+ default:
+ break;
+ }
+ llvm_unreachable("Unexpected vectorization of the instructions.");
+ }
+
+public:
+ InstructionsCompatibilityAnalysis(DominatorTree &DT, const DataLayout &DL,
+ const TargetTransformInfo &TTI,
+ const TargetLibraryInfo &TLI)
+ : DT(DT), DL(DL), TTI(TTI), TLI(TLI) {}
+
+ SmallVector<BoUpSLP::ValueList> buildOperands(const InstructionsState &S,
+ ArrayRef<Value *> VL) {
+ assert(S && "Invalid state!");
+ SmallVector<BoUpSLP::ValueList> Operands;
+ buildOriginalOperands(S, VL, Operands);
+ return Operands;
+ }
+};
+} // namespace
+
BoUpSLP::ScalarsVectorizationLegality
BoUpSLP::getScalarsVectorizationLegality(ArrayRef<Value *> VL, unsigned Depth,
const EdgeInfo &UserTreeIdx) const {
@@ -10136,6 +10315,8 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
registerNonVectorizableLoads(ArrayRef(VL));
return;
}
+ InstructionsCompatibilityAnalysis Analysis(*DT, *DL, *TTI, *TLI);
+ SmallVector<ValueList> Operands = Analysis.buildOperands(S, VL);
ScheduleBundle Empty;
ScheduleBundle &Bundle = BundlePtr.value() ? *BundlePtr.value() : Empty;
LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
@@ -10160,21 +10341,12 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
};
switch (ShuffleOrOp) {
case Instruction::PHI: {
- auto *PH = cast<PHINode>(VL0);
-
TreeEntry *TE =
newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndices);
LLVM_DEBUG(dbgs() << "SLP: added a new TreeEntry (PHINode).\n";
TE->dump());
- // Keeps the reordered operands to avoid code duplication.
- PHIHandler Handler(*DT, PH, VL);
- Handler.buildOperands();
- for (unsigned I : seq<unsigned>(PH->getNumOperands()))
- TE->setOperand(I, Handler.getOperands(I));
- SmallVector<ArrayRef<Value *>> Operands(PH->getNumOperands());
- for (unsigned I : seq<unsigned>(PH->getNumOperands()))
- Operands[I] = Handler.getOperands(I);
+ TE->setOperands(Operands);
CreateOperandNodes(TE, Operands);
return;
}
@@ -10201,7 +10373,7 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
TE->dump());
// This is a special case, as it does not gather, but at the same time
// we are not extending buildTreeRec() towards the operands.
- TE->setOperand(*this);
+ TE->setOperands(Operands);
return;
}
case Instruction::InsertElement: {
@@ -10232,7 +10404,7 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
LLVM_DEBUG(dbgs() << "SLP: added a new TreeEntry (InsertElementInst).\n";
TE->dump());
- TE->setOperand(*this);
+ TE->setOperands(Operands);
buildTreeRec(TE->getOperand(1), Depth + 1, {TE, 1});
return;
}
@@ -10287,7 +10459,13 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
case TreeEntry::NeedToGather:
llvm_unreachable("Unexpected loads state.");
}
- TE->setOperand(*this);
+ if (!CurrentOrder.empty() && State != TreeEntry::ScatterVectorize) {
+ assert(Operands.size() == 1 && "Expected a single operand only");
+ SmallVector<int> Mask;
+ inversePermutation(CurrentOrder, Mask);
+ reorderScalars(Operands.front(), Mask);
+ }
+ TE->setOperands(Operands);
if (State == TreeEntry::ScatterVectorize)
buildTreeRec(PointerOps, Depth + 1, {TE, 0});
return;
@@ -10328,7 +10506,7 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
LLVM_DEBUG(dbgs() << "SLP: added a new TreeEntry (CastInst).\n";
TE->dump());
- TE->setOperand(*this);
+ TE->setOperands(Operands);
for (unsigned I : seq<unsigned>(VL0->getNumOperands()))
buildTreeRec(TE->getOperand(I), Depth + 1, {TE, I});
if (ShuffleOrOp == Instruction::Trunc) {
@@ -10356,37 +10534,28 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
LLVM_DEBUG(dbgs() << "SLP: added a new TreeEntry (CmpInst).\n";
TE->dump());
- ValueList Left, Right;
- VLOperands Ops(VL, S, *this);
+ VLOperands Ops(VL, Operands, S, *this);
if (cast<CmpInst>(VL0)->isCommutative()) {
// Commutative predicate - collect + sort operands of the instructions
// so that each side is more likely to have the same opcode.
assert(P0 == CmpInst::getSwappedPredicate(P0) &&
"Commutative Predicate mismatch");
Ops.reorder();
- Left = Ops.getVL(0);
- Right = Ops.getVL(1);
+ Operands.front() = Ops.getVL(0);
+ Operands.back() = Ops.getVL(1);
} else {
// Collect operands - commute if it uses the swapped predicate.
- for (Value *V : VL) {
- if (isa<PoisonValue>(V)) {
- Left.push_back(PoisonValue::get(VL0->getOperand(0)->getType()));
- Right.push_back(PoisonValue::get(VL0->getOperand(1)->getType()));
+ for (auto [Idx, V] : enumerate(VL)) {
+ if (isa<PoisonValue>(V))
continue;
- }
auto *Cmp = cast<CmpInst>(V);
- Value *LHS = Cmp->getOperand(0);
- Value *RHS = Cmp->getOperand(1);
if (Cmp->getPredicate() != P0)
- std::swap(LHS, RHS);
- Left.push_back(LHS);
- Right.push_back(RHS);
+ std::swap(Operands.front()[Idx], Operands.back()[Idx]);
}
}
- TE->setOperand(0, Left);
- TE->setOperand(1, Right);
- buildTreeRec(Left, Depth + 1, {TE, 0});
- buildTreeRec(Right, Depth + 1, {TE, 1});
+ TE->setOperands(Operands);
+ buildTreeRec(Operands.front(), Depth + 1, {TE, 0});
+ buildTreeRec(Operands.back(), Depth + 1, {TE, 1});
if (ShuffleOrOp == Instruction::ICmp) {
unsigned NumSignBits0 =
ComputeNumSignBits(VL0->getOperand(0), *DL, 0, AC, nullptr, DT);
@@ -10429,7 +10598,13 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
"(SelectInst/UnaryOperator/BinaryOperator/FreezeInst).\n";
TE->dump());
- TE->setOperand(*this, isa<BinaryOperator>(VL0) && isCommutative(VL0));
+ if (isa<BinaryOperator>(VL0) && isCommutative(VL0)) {
+ VLOperands Ops(VL, Operands, S, *this);
+ Ops.reorder();
+ Operands[0] = Ops.getVL(0);
+ Operands[1] = Ops.getVL(1);
+ }
+ TE->setOperands(Operands);
for (unsigned I : seq<unsigned>(VL0->getNumOperands()))
buildTreeRec(TE->getOperand(I), Depth + 1, {TE, I});
return;
@@ -10439,52 +10614,7 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
ReuseShuffleIndices);
LLVM_DEBUG(dbgs() << "SLP: added a new TreeEntry (GetElementPtrInst).\n";
TE->dump());
- SmallVector<ValueList, 2> Operands(2);
- // Prepare the operand vector for pointer operands.
- for (Value *V : VL) {
- auto *GEP = dyn_cast<GetElementPtrInst>(V);
- if (!GEP) {
- Operands.front().push_back(V);
- continue;
- }
- Operands.front().push_back(GEP->getPointerOperand());
- }
- TE->setOperand(0, Operands.front());
- // Need to cast all indices to the same type before vectorization to
- // avoid crash.
- // Required to be able to find correct matches between different gather
- // nodes and reuse the vectorized values rather than trying to gather them
- // again.
- int IndexIdx = 1;
- Type *VL0Ty = VL0->getOperand(IndexIdx)->getType();
- Type *Ty = all_of(VL,
- [VL0Ty, IndexIdx](Value *V) {
- auto *GEP = dyn_cast<GetElementPtrInst>(V);
- if (!GEP)
- return true;
- return VL0Ty == GEP->getOperand(IndexIdx)->getType();
- })
- ? VL0Ty
- : DL->getIndexType(cast<GetElementPtrInst>(VL0)
- ->getPointerOperandType()
- ->getScalarType());
- // Prepare the operand vector.
- for (Value *V : VL) {
- auto *I = dyn_cast<GetElementPtrInst>(V);
- if (!I) {
- Operands.back().push_back(
- ConstantInt::get(Ty, 0, /*isSigned=*/false));
- continue;
- }
- auto *Op = I->getOperand(IndexIdx);
- auto *CI = dyn_cast<ConstantInt>(Op);
- if (!CI)
- Operands.back().push_back(Op);
- else
- Operands.back().push_back(ConstantFoldIntegerCast(
- CI, Ty, CI->getValue().isSignBitSet(), *DL));
- }
- TE->setOperand(IndexIdx, Operands.back());
+ TE->setOperands(Operands);
for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I)
buildTreeRec(Operands[I], Depth + 1, {TE, I});
@@ -10503,7 +10633,7 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
LLVM_DEBUG(
dbgs() << "SLP: added a new TreeEntry (jumbled StoreInst).\n";
TE->dump());
- TE->setOperand(*this);
+ TE->setOperands(Operands);
buildTreeRec(TE->getOperand(0), Depth + 1, {TE, 0});
return;
}
@@ -10517,7 +10647,13 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
ReuseShuffleIndices);
LLVM_DEBUG(dbgs() << "SLP: added a new TreeEntry (CallInst).\n";
TE->dump());
- TE->setOperand(*this, isCommutative(VL0));
+ if (isCommutative(VL0)) {
+ VLOperands Ops(VL, Operands, S, *this);
+ Ops.reorder();
+ Operands[0] = Ops.getVL(0);
+ Operands[1] = Ops.getVL(1);
+ }
+ TE->setOperands(Operands);
for (unsigned I : seq<unsigned>(CI->arg_size())) {
// For scalar operands no need to create an entry since no need to
// vectorize it.
@@ -10551,37 +10687,34 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
CmpInst::Predicate AltP = AltCI->getPredicate();
assert(MainP != AltP &&
"Expected different main/alternate predicates.");
- ValueList Left, Right;
// Collect operands - commute if it uses the swapped predicate or
// alternate operation.
- for (Value *V : VL) {
- if (isa<PoisonValue>(V)) {
- Left.push_back(PoisonValue::get(MainCI->getOperand(0)->getType()));
- Right.push_back(PoisonValue::get(MainCI->getOperand(1)->getType()));
+ for (auto [Idx, V] : enumerate(VL)) {
+ if (isa<PoisonValue>(V))
continue;
- }
auto *Cmp = cast<CmpInst>(V);
- Value *LHS = Cmp->getOperand(0);
- Value *RHS = Cmp->getOperand(1);
if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) {
if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate()))
- std::swap(LHS, RHS);
+ std::swap(Operands.front()[Idx], Operands.back()[Idx]);
} else {
if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate()))
- std::swap(LHS, RHS);
+ std::swap(Operands.front()[Idx], Operands.back()[Idx]);
}
- Left.push_back(LHS);
- Right.push_back(RHS);
}
- TE->setOperand(0, Left);
- TE->setOperand(1, Right);
- buildTreeRec(Left, Depth + 1, {TE, 0});
- buildTreeRec(Right, Depth + 1, {TE, 1});
+ TE->setOperands(Operands);
+ buildTreeRec(Operands.front(), Depth + 1, {TE, 0});
+ buildTreeRec(Operands.back(), Depth + 1, {TE, 1});
return;
}
- TE->setOperand(*this, isa<BinaryOperator>(VL0) || CI);
+ if (isa<BinaryOperator>(VL0) || CI) {
+ VLOperands Ops(VL, Operands, S, *this);
+ Ops.reorder();
+ Operands[0] = Ops.getVL(0);
+ Operands[1] = Ops.getVL(1);
+ }
+ TE->setOperands(Operands);
for (unsigned I : seq<unsigned>(VL0->getNumOperands()))
buildTreeRec(TE->getOperand(I), Depth + 1, {TE, I});
return;
@@ -18098,15 +18231,6 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
if (E->State == TreeEntry::Vectorize) {
NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign());
} else if (E->State == TreeEntry::CompressVectorize) {
- SmallVector<Value *> Scalars(E->Scalars.begin(), E->Scalars.end());
- if (!E->ReorderIndices.empty()) {
- SmallVector<int> Mask(E->ReorderIndices.begin(),
- E->ReorderIndices.end());
- reorderScalars(Scalars, Mask);
- }
- SmallVector<Value *> PointerOps(Scalars.size());
- for (auto [I, V] : enumerate(Scalars))
- PointerOps[I] = cast<LoadInst>(V)->getPointerOperand();
auto [CompressMask, LoadVecTy, InterleaveFactor, IsMasked] =
CompressEntryToData.at(E);
Align CommonAlignment = LI->getAlign();
diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
index 35e5415a..287bc93 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
@@ -460,11 +460,10 @@ static void addCanonicalIVRecipes(VPlan &Plan, VPBasicBlock *HeaderVPBB,
{CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
}
-void VPlanTransforms::prepareForVectorization(VPlan &Plan, Type *InductionTy,
- PredicatedScalarEvolution &PSE,
- bool RequiresScalarEpilogueCheck,
- bool TailFolded, Loop *TheLoop,
- DebugLoc IVDL) {
+void VPlanTransforms::prepareForVectorization(
+ VPlan &Plan, Type *InductionTy, PredicatedScalarEvolution &PSE,
+ bool RequiresScalarEpilogueCheck, bool TailFolded, Loop *TheLoop,
+ DebugLoc IVDL, bool HasUncountableEarlyExit, VFRange &Range) {
VPDominatorTree VPDT;
VPDT.recalculate(Plan);
@@ -491,19 +490,33 @@ void VPlanTransforms::prepareForVectorization(VPlan &Plan, Type *InductionTy,
addCanonicalIVRecipes(Plan, cast<VPBasicBlock>(HeaderVPB),
cast<VPBasicBlock>(LatchVPB), InductionTy, IVDL);
- // Disconnect all edges to exit blocks other than from the middle block.
- // TODO: VPlans with early exits should be explicitly converted to a form
- // exiting only via the latch here, including adjusting the exit condition,
- // instead of simply disconnecting the edges and adjusting the VPlan later.
- for (VPBlockBase *EB : Plan.getExitBlocks()) {
+ [[maybe_unused]] bool HandledUncountableEarlyExit = false;
+ // Disconnect all early exits from the loop leaving it with a single exit from
+ // the latch. Early exits that are countable are left for a scalar epilog. The
+ // condition of uncountable early exits (currently at most one is supported)
+ // is fused into the latch exit, and used to branch from middle block to the
+ // early exit destination.
+ for (VPIRBasicBlock *EB : Plan.getExitBlocks()) {
for (VPBlockBase *Pred : to_vector(EB->getPredecessors())) {
if (Pred == MiddleVPBB)
continue;
+ if (HasUncountableEarlyExit) {
+ assert(!HandledUncountableEarlyExit &&
+ "can handle exactly one uncountable early exit");
+ handleUncountableEarlyExit(cast<VPBasicBlock>(Pred), EB, Plan,
+ cast<VPBasicBlock>(HeaderVPB),
+ cast<VPBasicBlock>(LatchVPB), Range);
+ HandledUncountableEarlyExit = true;
+ }
+
cast<VPBasicBlock>(Pred)->getTerminator()->eraseFromParent();
VPBlockUtils::disconnectBlocks(Pred, EB);
}
}
+ assert((!HasUncountableEarlyExit || HandledUncountableEarlyExit) &&
+ "missed an uncountable exit that must be handled");
+
// Create SCEV and VPValue for the trip count.
// We use the symbolic max backedge-taken-count, which works also when
// vectorizing loops with uncountable early exits.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 6a4ffac..3c7ab7d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -1785,10 +1785,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
VPValue *RHS = getOperand(1);
// Certain instructions can be cheaper to vectorize if they have a constant
// second vector operand. One example of this are shifts on x86.
- TargetTransformInfo::OperandValueInfo RHSInfo = {
- TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None};
- if (RHS->isLiveIn())
- RHSInfo = Ctx.TTI.getOperandInfo(RHS->getLiveInIRValue());
+ TargetTransformInfo::OperandValueInfo RHSInfo = Ctx.getOperandInfo(RHS);
if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
getOperand(1)->isDefinedOutsideLoopRegions())
@@ -2711,6 +2708,40 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
// VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
// transform, avoid computing their cost multiple times for now.
Ctx.SkipCostComputation.insert(UI);
+
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
+ Type *ResultTy = Ctx.Types.inferScalarType(this);
+ switch (UI->getOpcode()) {
+ case Instruction::GetElementPtr:
+ // We mark this instruction as zero-cost because the cost of GEPs in
+ // vectorized code depends on whether the corresponding memory instruction
+ // is scalarized or not. Therefore, we handle GEPs with the memory
+ // instruction cost.
+ return 0;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ auto Op2Info = Ctx.getOperandInfo(getOperand(1));
+ SmallVector<const Value *, 4> Operands(UI->operand_values());
+ return Ctx.TTI.getArithmeticInstrCost(
+ UI->getOpcode(), ResultTy, CostKind,
+ {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
+ Op2Info, Operands, UI, &Ctx.TLI) *
+ (isUniform() ? 1 : VF.getKnownMinValue());
+ }
+ }
+
return Ctx.getLegacyCost(UI, VF);
}
@@ -2854,8 +2885,9 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
Cost +=
Ctx.TTI.getMaskedMemoryOpCost(Opcode, Ty, Alignment, AS, Ctx.CostKind);
} else {
- TTI::OperandValueInfo OpInfo =
- Ctx.TTI.getOperandInfo(Ingredient.getOperand(0));
+ TTI::OperandValueInfo OpInfo = Ctx.getOperandInfo(
+ isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this) ? getOperand(0)
+ : getOperand(1));
Cost += Ctx.TTI.getMemoryOpCost(Opcode, Ty, Alignment, AS, Ctx.CostKind,
OpInfo, &Ingredient);
}
@@ -2976,7 +3008,7 @@ InstructionCost VPWidenLoadEVLRecipe::computeCost(ElementCount VF,
unsigned AS =
getLoadStoreAddressSpace(const_cast<Instruction *>(&Ingredient));
InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost(
- Ingredient.getOpcode(), Ty, Alignment, AS, Ctx.CostKind);
+ Instruction::Load, Ty, Alignment, AS, Ctx.CostKind);
if (!Reverse)
return Cost;
@@ -3091,7 +3123,7 @@ InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF,
unsigned AS =
getLoadStoreAddressSpace(const_cast<Instruction *>(&Ingredient));
InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost(
- Ingredient.getOpcode(), Ty, Alignment, AS, Ctx.CostKind);
+ Instruction::Store, Ty, Alignment, AS, Ctx.CostKind);
if (!Reverse)
return Cost;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index b10b47c..806c20e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2461,63 +2461,56 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan,
}
void VPlanTransforms::handleUncountableEarlyExit(
- VPlan &Plan, Loop *OrigLoop, BasicBlock *UncountableExitingBlock,
- VPRecipeBuilder &RecipeBuilder, VFRange &Range) {
- VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
- auto *LatchVPBB = cast<VPBasicBlock>(LoopRegion->getExiting());
- VPBuilder Builder(LatchVPBB->getTerminator());
- auto *MiddleVPBB = Plan.getMiddleBlock();
- VPValue *IsEarlyExitTaken = nullptr;
-
- // Process the uncountable exiting block. Update IsEarlyExitTaken, which
- // tracks if the uncountable early exit has been taken. Also split the middle
- // block and have it conditionally branch to the early exit block if
- // EarlyExitTaken.
- auto *EarlyExitingBranch =
- cast<BranchInst>(UncountableExitingBlock->getTerminator());
- BasicBlock *TrueSucc = EarlyExitingBranch->getSuccessor(0);
- BasicBlock *FalseSucc = EarlyExitingBranch->getSuccessor(1);
- BasicBlock *EarlyExitIRBB =
- !OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc;
- VPIRBasicBlock *VPEarlyExitBlock = Plan.getExitBlock(EarlyExitIRBB);
-
- VPValue *EarlyExitNotTakenCond = RecipeBuilder.getBlockInMask(
- OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
- auto *EarlyExitTakenCond = Builder.createNot(EarlyExitNotTakenCond);
- IsEarlyExitTaken =
- Builder.createNaryOp(VPInstruction::AnyOf, {EarlyExitTakenCond});
+ VPBasicBlock *EarlyExitingVPBB, VPBasicBlock *EarlyExitVPBB, VPlan &Plan,
+ VPBasicBlock *HeaderVPBB, VPBasicBlock *LatchVPBB, VFRange &Range) {
+ using namespace llvm::VPlanPatternMatch;
+ VPBlockBase *MiddleVPBB = LatchVPBB->getSuccessors()[0];
+ if (!EarlyExitVPBB->getSinglePredecessor() &&
+ EarlyExitVPBB->getPredecessors()[1] == MiddleVPBB) {
+ assert(EarlyExitVPBB->getNumPredecessors() == 2 &&
+ EarlyExitVPBB->getPredecessors()[0] == EarlyExitingVPBB &&
+ "unsupported early exit VPBB");
+ // Early exit operand should always be last phi operand. If EarlyExitVPBB
+ // has two predecessors and EarlyExitingVPBB is the first, swap the operands
+ // of the phis.
+ for (VPRecipeBase &R : EarlyExitVPBB->phis())
+ cast<VPIRPhi>(&R)->swapOperands();
+ }
+
+ VPBuilder Builder(LatchVPBB->getTerminator());
+ VPBlockBase *TrueSucc = EarlyExitingVPBB->getSuccessors()[0];
+ assert(
+ match(EarlyExitingVPBB->getTerminator(), m_BranchOnCond(m_VPValue())) &&
+ "Terminator must be be BranchOnCond");
+ VPValue *CondOfEarlyExitingVPBB =
+ EarlyExitingVPBB->getTerminator()->getOperand(0);
+ auto *CondToEarlyExit = TrueSucc == EarlyExitVPBB
+ ? CondOfEarlyExitingVPBB
+ : Builder.createNot(CondOfEarlyExitingVPBB);
+
+ // Split the middle block and have it conditionally branch to the early exit
+ // block if CondToEarlyExit.
+ VPValue *IsEarlyExitTaken =
+ Builder.createNaryOp(VPInstruction::AnyOf, {CondToEarlyExit});
VPBasicBlock *NewMiddle = Plan.createVPBasicBlock("middle.split");
VPBasicBlock *VectorEarlyExitVPBB =
Plan.createVPBasicBlock("vector.early.exit");
- VPBlockUtils::insertOnEdge(LoopRegion, MiddleVPBB, NewMiddle);
+ VPBlockUtils::insertOnEdge(LatchVPBB, MiddleVPBB, NewMiddle);
VPBlockUtils::connectBlocks(NewMiddle, VectorEarlyExitVPBB);
NewMiddle->swapSuccessors();
- VPBlockUtils::connectBlocks(VectorEarlyExitVPBB, VPEarlyExitBlock);
+ VPBlockUtils::connectBlocks(VectorEarlyExitVPBB, EarlyExitVPBB);
// Update the exit phis in the early exit block.
VPBuilder MiddleBuilder(NewMiddle);
VPBuilder EarlyExitB(VectorEarlyExitVPBB);
- for (VPRecipeBase &R : VPEarlyExitBlock->phis()) {
+ for (VPRecipeBase &R : EarlyExitVPBB->phis()) {
auto *ExitIRI = cast<VPIRPhi>(&R);
- // Early exit operand should always be last, i.e., 0 if VPEarlyExitBlock has
+ // Early exit operand should always be last, i.e., 0 if EarlyExitVPBB has
// a single predecessor and 1 if it has two.
unsigned EarlyExitIdx = ExitIRI->getNumOperands() - 1;
- if (!VPEarlyExitBlock->getSinglePredecessor()) {
- // If VPEarlyExitBlock has two predecessors, they are already ordered such
- // that early exit is second (and latch exit is first), by construction.
- // But its underlying IRBB (EarlyExitIRBB) may have its predecessors
- // ordered the other way around, and it is the order of the latter which
- // corresponds to the order of operands of VPEarlyExitBlock's phi recipes.
- // Therefore, if early exit (UncountableExitingBlock) is the first
- // predecessor of EarlyExitIRBB, we swap the operands of phi recipes,
- // thereby bringing them to match VPEarlyExitBlock's predecessor order,
- // with early exit being last (second). Otherwise they already match.
- if (*pred_begin(VPEarlyExitBlock->getIRBasicBlock()) ==
- UncountableExitingBlock)
- ExitIRI->swapOperands();
-
+ if (ExitIRI->getNumOperands() != 1) {
// The first of two operands corresponds to the latch exit, via MiddleVPBB
// predecessor. Extract its last lane.
ExitIRI->extractLastLaneOfFirstOperand(MiddleBuilder);
@@ -2533,7 +2526,7 @@ void VPlanTransforms::handleUncountableEarlyExit(
LoopVectorizationPlanner::getDecisionAndClampRange(IsVector, Range)) {
// Update the incoming value from the early exit.
VPValue *FirstActiveLane = EarlyExitB.createNaryOp(
- VPInstruction::FirstActiveLane, {EarlyExitTakenCond}, nullptr,
+ VPInstruction::FirstActiveLane, {CondToEarlyExit}, nullptr,
"first.active.lane");
IncomingFromEarlyExit = EarlyExitB.createNaryOp(
Instruction::ExtractElement, {IncomingFromEarlyExit, FirstActiveLane},
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index cb127d3..d284d91 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -69,7 +69,8 @@ struct VPlanTransforms {
PredicatedScalarEvolution &PSE,
bool RequiresScalarEpilogueCheck,
bool TailFolded, Loop *TheLoop,
- DebugLoc IVDL);
+ DebugLoc IVDL, bool HasUncountableExit,
+ VFRange &Range);
/// Replace loops in \p Plan's flat CFG with VPRegionBlocks, turning \p Plan's
/// flat CFG into a hierarchical CFG.
@@ -173,15 +174,16 @@ struct VPlanTransforms {
/// Remove dead recipes from \p Plan.
static void removeDeadRecipes(VPlan &Plan);
- /// Update \p Plan to account for the uncountable early exit block in \p
- /// UncountableExitingBlock by
- /// * updating the condition exiting the vector loop to include the early
- /// exit conditions
+ /// Update \p Plan to account for the uncountable early exit from \p
+ /// EarlyExitingVPBB to \p EarlyExitVPBB by
+ /// * updating the condition exiting the loop via the latch to include the
+ /// early exit condition,
/// * splitting the original middle block to branch to the early exit block
- /// if taken.
- static void handleUncountableEarlyExit(VPlan &Plan, Loop *OrigLoop,
- BasicBlock *UncountableExitingBlock,
- VPRecipeBuilder &RecipeBuilder,
+ /// conditionally - according to the early exit condition.
+ static void handleUncountableEarlyExit(VPBasicBlock *EarlyExitingVPBB,
+ VPBasicBlock *EarlyExitVPBB,
+ VPlan &Plan, VPBasicBlock *HeaderVPBB,
+ VPBasicBlock *LatchVPBB,
VFRange &Range);
/// Lower abstract recipes to concrete ones, that can be codegen'd. Use \p
diff --git a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
index fccb1fb..0f08556 100644
--- a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
@@ -85,24 +85,16 @@ define <8 x i8> @test_varidx_extract_v16s8(<16 x i8> %x, i32 %idx) {
; CHECK-GISEL-NEXT: mov x8, sp
; CHECK-GISEL-NEXT: str q0, [sp]
; CHECK-GISEL-NEXT: and x9, x9, #0xf
-; CHECK-GISEL-NEXT: mov b2, v0.b[1]
-; CHECK-GISEL-NEXT: mov b3, v0.b[2]
; CHECK-GISEL-NEXT: lsl x10, x9, #1
; CHECK-GISEL-NEXT: sub x9, x10, x9
; CHECK-GISEL-NEXT: ldr b1, [x8, x9]
-; CHECK-GISEL-NEXT: mov v1.b[0], v1.b[0]
-; CHECK-GISEL-NEXT: mov v1.b[1], v2.b[0]
-; CHECK-GISEL-NEXT: mov b2, v0.b[3]
-; CHECK-GISEL-NEXT: mov v1.b[2], v3.b[0]
-; CHECK-GISEL-NEXT: mov b3, v0.b[4]
-; CHECK-GISEL-NEXT: mov v1.b[3], v2.b[0]
-; CHECK-GISEL-NEXT: mov b2, v0.b[5]
-; CHECK-GISEL-NEXT: mov v1.b[4], v3.b[0]
-; CHECK-GISEL-NEXT: mov b3, v0.b[6]
-; CHECK-GISEL-NEXT: mov b0, v0.b[7]
-; CHECK-GISEL-NEXT: mov v1.b[5], v2.b[0]
-; CHECK-GISEL-NEXT: mov v1.b[6], v3.b[0]
-; CHECK-GISEL-NEXT: mov v1.b[7], v0.b[0]
+; CHECK-GISEL-NEXT: mov v1.b[1], v0.b[1]
+; CHECK-GISEL-NEXT: mov v1.b[2], v0.b[2]
+; CHECK-GISEL-NEXT: mov v1.b[3], v0.b[3]
+; CHECK-GISEL-NEXT: mov v1.b[4], v0.b[4]
+; CHECK-GISEL-NEXT: mov v1.b[5], v0.b[5]
+; CHECK-GISEL-NEXT: mov v1.b[6], v0.b[6]
+; CHECK-GISEL-NEXT: mov v1.b[7], v0.b[7]
; CHECK-GISEL-NEXT: fmov d0, d1
; CHECK-GISEL-NEXT: add sp, sp, #16
; CHECK-GISEL-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
index 0412aef..4d06037 100644
--- a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
@@ -13326,10 +13326,9 @@ define <16 x i8> @test_v16i8_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <16
;
; CHECK-GI-LABEL: test_v16i8_post_reg_ld1lane:
; CHECK-GI: ; %bb.0:
-; CHECK-GI-NEXT: ldr b1, [x0]
+; CHECK-GI-NEXT: ld1.b { v0 }[1], [x0]
; CHECK-GI-NEXT: add x8, x0, x2
; CHECK-GI-NEXT: str x8, [x1]
-; CHECK-GI-NEXT: mov.b v0[1], v1[0]
; CHECK-GI-NEXT: ret
%tmp1 = load i8, ptr %bar
%tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
@@ -13373,11 +13372,10 @@ define <8 x i8> @test_v8i8_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <8 x i
;
; CHECK-GI-LABEL: test_v8i8_post_reg_ld1lane:
; CHECK-GI: ; %bb.0:
-; CHECK-GI-NEXT: ldr b1, [x0]
; CHECK-GI-NEXT: ; kill: def $d0 killed $d0 def $q0
; CHECK-GI-NEXT: add x8, x0, x2
+; CHECK-GI-NEXT: ld1.b { v0 }[1], [x0]
; CHECK-GI-NEXT: str x8, [x1]
-; CHECK-GI-NEXT: mov.b v0[1], v1[0]
; CHECK-GI-NEXT: ; kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
%tmp1 = load i8, ptr %bar
@@ -13891,43 +13889,20 @@ define void @test_ld1lane_build_half(ptr %a, ptr %b, ptr %c, ptr %d, <4 x half>
}
define void @test_ld1lane_build_i8(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ptr %h, <8 x i8> %v, ptr %p) {
-; CHECK-SD-LABEL: test_ld1lane_build_i8:
-; CHECK-SD: ; %bb.0:
-; CHECK-SD-NEXT: ldr b1, [x0]
-; CHECK-SD-NEXT: ldr x8, [sp]
-; CHECK-SD-NEXT: ld1.b { v1 }[1], [x1]
-; CHECK-SD-NEXT: ld1.b { v1 }[2], [x2]
-; CHECK-SD-NEXT: ld1.b { v1 }[3], [x3]
-; CHECK-SD-NEXT: ld1.b { v1 }[4], [x4]
-; CHECK-SD-NEXT: ld1.b { v1 }[5], [x5]
-; CHECK-SD-NEXT: ld1.b { v1 }[6], [x6]
-; CHECK-SD-NEXT: ld1.b { v1 }[7], [x7]
-; CHECK-SD-NEXT: sub.8b v0, v1, v0
-; CHECK-SD-NEXT: str d0, [x8]
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_ld1lane_build_i8:
-; CHECK-GI: ; %bb.0:
-; CHECK-GI-NEXT: ldr b1, [x0]
-; CHECK-GI-NEXT: ldr b2, [x1]
-; CHECK-GI-NEXT: ldr x8, [sp]
-; CHECK-GI-NEXT: mov.b v1[0], v1[0]
-; CHECK-GI-NEXT: mov.b v1[1], v2[0]
-; CHECK-GI-NEXT: ldr b2, [x2]
-; CHECK-GI-NEXT: mov.b v1[2], v2[0]
-; CHECK-GI-NEXT: ldr b2, [x3]
-; CHECK-GI-NEXT: mov.b v1[3], v2[0]
-; CHECK-GI-NEXT: ldr b2, [x4]
-; CHECK-GI-NEXT: mov.b v1[4], v2[0]
-; CHECK-GI-NEXT: ldr b2, [x5]
-; CHECK-GI-NEXT: mov.b v1[5], v2[0]
-; CHECK-GI-NEXT: ldr b2, [x6]
-; CHECK-GI-NEXT: mov.b v1[6], v2[0]
-; CHECK-GI-NEXT: ldr b2, [x7]
-; CHECK-GI-NEXT: mov.b v1[7], v2[0]
-; CHECK-GI-NEXT: sub.8b v0, v1, v0
-; CHECK-GI-NEXT: str d0, [x8]
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_ld1lane_build_i8:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: ldr b1, [x0]
+; CHECK-NEXT: ldr x8, [sp]
+; CHECK-NEXT: ld1.b { v1 }[1], [x1]
+; CHECK-NEXT: ld1.b { v1 }[2], [x2]
+; CHECK-NEXT: ld1.b { v1 }[3], [x3]
+; CHECK-NEXT: ld1.b { v1 }[4], [x4]
+; CHECK-NEXT: ld1.b { v1 }[5], [x5]
+; CHECK-NEXT: ld1.b { v1 }[6], [x6]
+; CHECK-NEXT: ld1.b { v1 }[7], [x7]
+; CHECK-NEXT: sub.8b v0, v1, v0
+; CHECK-NEXT: str d0, [x8]
+; CHECK-NEXT: ret
%ld.a = load i8, ptr %a
%ld.b = load i8, ptr %b
%ld.c = load i8, ptr %c
diff --git a/llvm/test/CodeGen/AArch64/arm64-ld1.ll b/llvm/test/CodeGen/AArch64/arm64-ld1.ll
index eaa5454..0b22fa4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ld1.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ld1.ll
@@ -1004,16 +1004,10 @@ declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0(ptr) nounwin
declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0(ptr) nounwind readonly
define <16 x i8> @ld1_16b(<16 x i8> %V, ptr %bar) {
-; CHECK-SD-LABEL: ld1_16b:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ld1.b { v0 }[0], [x0]
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: ld1_16b:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr b1, [x0]
-; CHECK-GI-NEXT: mov.b v0[0], v1[0]
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: ld1_16b:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld1.b { v0 }[0], [x0]
+; CHECK-NEXT: ret
; Make sure we are using the operands defined by the ABI
%tmp1 = load i8, ptr %bar
%tmp2 = insertelement <16 x i8> %V, i8 %tmp1, i32 0
@@ -1086,20 +1080,12 @@ define <1 x i64> @ld1_1d(ptr %p) {
}
define <8 x i8> @ld1_8b(<8 x i8> %V, ptr %bar) {
-; CHECK-SD-LABEL: ld1_8b:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: ld1.b { v0 }[0], [x0]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: ld1_8b:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr b1, [x0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov.b v0[0], v1[0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: ld1_8b:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: ld1.b { v0 }[0], [x0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
; Make sure we are using the operands defined by the ABI
%tmp1 = load i8, ptr %bar
%tmp2 = insertelement <8 x i8> %V, i8 %tmp1, i32 0
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
index 2a085dc..51f1351 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -75,18 +75,11 @@ define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) {
}
define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) {
-; CHECK-SD-LABEL: ins16b16:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: mov v1.b[15], v0.b[2]
-; CHECK-SD-NEXT: mov v0.16b, v1.16b
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: ins16b16:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mov b2, v0.b[2]
-; CHECK-GI-NEXT: mov v0.16b, v1.16b
-; CHECK-GI-NEXT: mov v0.b[15], v2.b[0]
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: ins16b16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov v1.b[15], v0.b[2]
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
%tmp3 = extractelement <16 x i8> %tmp1, i32 2
%tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
ret <16 x i8> %tmp4
@@ -148,20 +141,12 @@ define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) {
}
define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) {
-; CHECK-SD-LABEL: ins8b16:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: mov v1.b[15], v0.b[2]
-; CHECK-SD-NEXT: mov v0.16b, v1.16b
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: ins8b16:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov b2, v0.b[2]
-; CHECK-GI-NEXT: mov v0.16b, v1.16b
-; CHECK-GI-NEXT: mov v0.b[15], v2.b[0]
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: ins8b16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: mov v1.b[15], v0.b[2]
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
%tmp3 = extractelement <8 x i8> %tmp1, i32 2
%tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
ret <16 x i8> %tmp4
@@ -239,20 +224,12 @@ define <2 x double> @ins1f2_args_flipped(<2 x double> %tmp2, <1 x double> %tmp1)
}
define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) {
-; CHECK-SD-LABEL: ins16b8:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v1.b[7], v0.b[2]
-; CHECK-SD-NEXT: fmov d0, d1
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: ins16b8:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mov b2, v0.b[2]
-; CHECK-GI-NEXT: fmov d0, d1
-; CHECK-GI-NEXT: mov v0.b[7], v2.b[0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: ins16b8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: mov v1.b[7], v0.b[2]
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
%tmp3 = extractelement <16 x i8> %tmp1, i32 2
%tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 7
ret <8 x i8> %tmp4
@@ -321,22 +298,13 @@ define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
}
define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) {
-; CHECK-SD-LABEL: ins8b8:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: mov v1.b[4], v0.b[2]
-; CHECK-SD-NEXT: fmov d0, d1
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: ins8b8:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov b2, v0.b[2]
-; CHECK-GI-NEXT: fmov d0, d1
-; CHECK-GI-NEXT: mov v0.b[4], v2.b[0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: ins8b8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: mov v1.b[4], v0.b[2]
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
%tmp3 = extractelement <8 x i8> %tmp1, i32 2
%tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 4
ret <8 x i8> %tmp4
@@ -617,37 +585,22 @@ define i64 @smovx2s(<2 x i32> %tmp1) {
}
define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) {
-; CHECK-SD-LABEL: test_vcopy_lane_s8:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.b[5], v1.b[3]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vcopy_lane_s8:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov b1, v1.b[3]
-; CHECK-GI-NEXT: mov v0.b[5], v1.b[0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vcopy_lane_s8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: mov v0.b[5], v1.b[3]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
%vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 11, i32 6, i32 7>
ret <8 x i8> %vset_lane
}
define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) {
-; CHECK-SD-LABEL: test_vcopyq_laneq_s8:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: mov v0.b[14], v1.b[6]
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vcopyq_laneq_s8:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mov b1, v1.b[6]
-; CHECK-GI-NEXT: mov v0.b[14], v1.b[0]
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vcopyq_laneq_s8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov v0.b[14], v1.b[6]
+; CHECK-NEXT: ret
%vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 22, i32 15>
ret <16 x i8> %vset_lane
}
@@ -665,18 +618,11 @@ define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) {
}
define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) {
-; CHECK-SD-LABEL: test_vcopyq_laneq_swap_s8:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: mov v1.b[0], v0.b[15]
-; CHECK-SD-NEXT: mov v0.16b, v1.16b
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vcopyq_laneq_swap_s8:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mov b2, v0.b[15]
-; CHECK-GI-NEXT: mov v0.16b, v1.16b
-; CHECK-GI-NEXT: mov v0.b[0], v2.b[0]
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vcopyq_laneq_swap_s8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov v1.b[0], v0.b[15]
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
%vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 15, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
ret <16 x i8> %vset_lane
}
@@ -1358,21 +1304,14 @@ define <8 x i8> @getl(<16 x i8> %x) #0 {
;
; CHECK-GI-LABEL: getl:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mov b2, v0.b[1]
; CHECK-GI-NEXT: mov v1.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov b2, v0.b[3]
-; CHECK-GI-NEXT: mov v1.b[2], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[4]
-; CHECK-GI-NEXT: mov v1.b[3], v2.b[0]
-; CHECK-GI-NEXT: mov b2, v0.b[5]
-; CHECK-GI-NEXT: mov v1.b[4], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[6]
-; CHECK-GI-NEXT: mov b0, v0.b[7]
-; CHECK-GI-NEXT: mov v1.b[5], v2.b[0]
-; CHECK-GI-NEXT: mov v1.b[6], v3.b[0]
-; CHECK-GI-NEXT: mov v1.b[7], v0.b[0]
+; CHECK-GI-NEXT: mov v1.b[1], v0.b[1]
+; CHECK-GI-NEXT: mov v1.b[2], v0.b[2]
+; CHECK-GI-NEXT: mov v1.b[3], v0.b[3]
+; CHECK-GI-NEXT: mov v1.b[4], v0.b[4]
+; CHECK-GI-NEXT: mov v1.b[5], v0.b[5]
+; CHECK-GI-NEXT: mov v1.b[6], v0.b[6]
+; CHECK-GI-NEXT: mov v1.b[7], v0.b[7]
; CHECK-GI-NEXT: fmov d0, d1
; CHECK-GI-NEXT: ret
%vecext = extractelement <16 x i8> %x, i32 0
@@ -1804,22 +1743,15 @@ define <16 x i8> @test_concat_v16i8_v8i8_v16i8(<8 x i8> %x, <16 x i8> %y) #0 {
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: mov v2.16b, v1.16b
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov b3, v0.b[1]
; CHECK-GI-NEXT: adrp x8, .LCPI127_0
; CHECK-GI-NEXT: mov v1.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov b4, v0.b[2]
-; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[3]
-; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
-; CHECK-GI-NEXT: mov b4, v0.b[4]
-; CHECK-GI-NEXT: mov v1.b[3], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[5]
-; CHECK-GI-NEXT: mov v1.b[4], v4.b[0]
-; CHECK-GI-NEXT: mov b4, v0.b[6]
-; CHECK-GI-NEXT: mov b0, v0.b[7]
-; CHECK-GI-NEXT: mov v1.b[5], v3.b[0]
-; CHECK-GI-NEXT: mov v1.b[6], v4.b[0]
-; CHECK-GI-NEXT: mov v1.b[7], v0.b[0]
+; CHECK-GI-NEXT: mov v1.b[1], v0.b[1]
+; CHECK-GI-NEXT: mov v1.b[2], v0.b[2]
+; CHECK-GI-NEXT: mov v1.b[3], v0.b[3]
+; CHECK-GI-NEXT: mov v1.b[4], v0.b[4]
+; CHECK-GI-NEXT: mov v1.b[5], v0.b[5]
+; CHECK-GI-NEXT: mov v1.b[6], v0.b[6]
+; CHECK-GI-NEXT: mov v1.b[7], v0.b[7]
; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI127_0]
; CHECK-GI-NEXT: tbl v0.16b, { v1.16b, v2.16b }, v0.16b
; CHECK-GI-NEXT: ret
@@ -1853,37 +1785,23 @@ define <16 x i8> @test_concat_v16i8_v16i8_v8i8(<16 x i8> %x, <8 x i8> %y) #0 {
;
; CHECK-GI-LABEL: test_concat_v16i8_v16i8_v8i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: mov b3, v0.b[1]
-; CHECK-GI-NEXT: mov v2.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[0]
; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov b4, v0.b[2]
-; CHECK-GI-NEXT: mov v2.b[1], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[3]
-; CHECK-GI-NEXT: mov v2.b[2], v4.b[0]
-; CHECK-GI-NEXT: mov b4, v0.b[4]
-; CHECK-GI-NEXT: mov v2.b[3], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[5]
-; CHECK-GI-NEXT: mov v2.b[4], v4.b[0]
-; CHECK-GI-NEXT: mov b4, v0.b[6]
-; CHECK-GI-NEXT: mov b0, v0.b[7]
-; CHECK-GI-NEXT: mov v2.b[5], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v1.b[2]
-; CHECK-GI-NEXT: mov v2.b[6], v4.b[0]
-; CHECK-GI-NEXT: mov v2.b[7], v0.b[0]
-; CHECK-GI-NEXT: mov b0, v1.b[1]
+; CHECK-GI-NEXT: mov v2.b[1], v0.b[1]
+; CHECK-GI-NEXT: mov v2.b[2], v0.b[2]
+; CHECK-GI-NEXT: mov v2.b[3], v0.b[3]
+; CHECK-GI-NEXT: mov v2.b[4], v0.b[4]
+; CHECK-GI-NEXT: mov v2.b[5], v0.b[5]
+; CHECK-GI-NEXT: mov v2.b[6], v0.b[6]
+; CHECK-GI-NEXT: mov v2.b[7], v0.b[7]
; CHECK-GI-NEXT: mov v2.b[8], v1.b[0]
-; CHECK-GI-NEXT: mov v2.b[9], v0.b[0]
-; CHECK-GI-NEXT: mov b0, v1.b[3]
-; CHECK-GI-NEXT: mov v2.b[10], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v1.b[4]
-; CHECK-GI-NEXT: mov v2.b[11], v0.b[0]
-; CHECK-GI-NEXT: mov b0, v1.b[5]
-; CHECK-GI-NEXT: mov v2.b[12], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v1.b[6]
-; CHECK-GI-NEXT: mov v2.b[13], v0.b[0]
-; CHECK-GI-NEXT: mov b0, v1.b[7]
-; CHECK-GI-NEXT: mov v2.b[14], v3.b[0]
-; CHECK-GI-NEXT: mov v2.b[15], v0.b[0]
+; CHECK-GI-NEXT: mov v2.b[9], v1.b[1]
+; CHECK-GI-NEXT: mov v2.b[10], v1.b[2]
+; CHECK-GI-NEXT: mov v2.b[11], v1.b[3]
+; CHECK-GI-NEXT: mov v2.b[12], v1.b[4]
+; CHECK-GI-NEXT: mov v2.b[13], v1.b[5]
+; CHECK-GI-NEXT: mov v2.b[14], v1.b[6]
+; CHECK-GI-NEXT: mov v2.b[15], v1.b[7]
; CHECK-GI-NEXT: mov v0.16b, v2.16b
; CHECK-GI-NEXT: ret
entry:
@@ -1933,37 +1851,23 @@ define <16 x i8> @test_concat_v16i8_v8i8_v8i8(<8 x i8> %x, <8 x i8> %y) #0 {
; CHECK-GI-LABEL: test_concat_v16i8_v8i8_v8i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov b3, v0.b[1]
; CHECK-GI-NEXT: mov v2.b[0], v0.b[0]
; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov b4, v0.b[2]
-; CHECK-GI-NEXT: mov v2.b[1], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[3]
-; CHECK-GI-NEXT: mov v2.b[2], v4.b[0]
-; CHECK-GI-NEXT: mov b4, v0.b[4]
-; CHECK-GI-NEXT: mov v2.b[3], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v0.b[5]
-; CHECK-GI-NEXT: mov v2.b[4], v4.b[0]
-; CHECK-GI-NEXT: mov b4, v0.b[6]
-; CHECK-GI-NEXT: mov b0, v0.b[7]
-; CHECK-GI-NEXT: mov v2.b[5], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v1.b[2]
-; CHECK-GI-NEXT: mov v2.b[6], v4.b[0]
-; CHECK-GI-NEXT: mov v2.b[7], v0.b[0]
-; CHECK-GI-NEXT: mov b0, v1.b[1]
+; CHECK-GI-NEXT: mov v2.b[1], v0.b[1]
+; CHECK-GI-NEXT: mov v2.b[2], v0.b[2]
+; CHECK-GI-NEXT: mov v2.b[3], v0.b[3]
+; CHECK-GI-NEXT: mov v2.b[4], v0.b[4]
+; CHECK-GI-NEXT: mov v2.b[5], v0.b[5]
+; CHECK-GI-NEXT: mov v2.b[6], v0.b[6]
+; CHECK-GI-NEXT: mov v2.b[7], v0.b[7]
; CHECK-GI-NEXT: mov v2.b[8], v1.b[0]
-; CHECK-GI-NEXT: mov v2.b[9], v0.b[0]
-; CHECK-GI-NEXT: mov b0, v1.b[3]
-; CHECK-GI-NEXT: mov v2.b[10], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v1.b[4]
-; CHECK-GI-NEXT: mov v2.b[11], v0.b[0]
-; CHECK-GI-NEXT: mov b0, v1.b[5]
-; CHECK-GI-NEXT: mov v2.b[12], v3.b[0]
-; CHECK-GI-NEXT: mov b3, v1.b[6]
-; CHECK-GI-NEXT: mov v2.b[13], v0.b[0]
-; CHECK-GI-NEXT: mov b0, v1.b[7]
-; CHECK-GI-NEXT: mov v2.b[14], v3.b[0]
-; CHECK-GI-NEXT: mov v2.b[15], v0.b[0]
+; CHECK-GI-NEXT: mov v2.b[9], v1.b[1]
+; CHECK-GI-NEXT: mov v2.b[10], v1.b[2]
+; CHECK-GI-NEXT: mov v2.b[11], v1.b[3]
+; CHECK-GI-NEXT: mov v2.b[12], v1.b[4]
+; CHECK-GI-NEXT: mov v2.b[13], v1.b[5]
+; CHECK-GI-NEXT: mov v2.b[14], v1.b[6]
+; CHECK-GI-NEXT: mov v2.b[15], v1.b[7]
; CHECK-GI-NEXT: mov v0.16b, v2.16b
; CHECK-GI-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll b/llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
index 7721616..f47c06e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
@@ -329,16 +329,10 @@ define <1 x double> @testDUP.v1f64(ptr %a, ptr %b) #0 {
}
define <16 x i8> @test_vld1q_lane_s8(ptr %a, <16 x i8> %b) {
-; CHECK-GI-LABEL: test_vld1q_lane_s8:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ld1 { v0.b }[15], [x0]
-; CHECK-GI-NEXT: ret
-;
-; CHECK-SD-LABEL: test_vld1q_lane_s8:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: ldr b1, [x0]
-; CHECK-SD-NEXT: mov v0.b[15], v1.b[0]
-; CHECK-SD-NEXT: ret
+; CHECK-LABEL: test_vld1q_lane_s8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ld1 { v0.b }[15], [x0]
+; CHECK-NEXT: ret
entry:
%0 = load i8, ptr %a, align 1
%vld1_lane = insertelement <16 x i8> %b, i8 %0, i32 15
@@ -401,20 +395,12 @@ entry:
}
define <8 x i8> @test_vld1_lane_s8(ptr %a, <8 x i8> %b) {
-; CHECK-GI-LABEL: test_vld1_lane_s8:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: ld1 { v0.b }[7], [x0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
-;
-; CHECK-SD-LABEL: test_vld1_lane_s8:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: ldr b1, [x0]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: mov v0.b[7], v1.b[0]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-SD-NEXT: ret
+; CHECK-LABEL: test_vld1_lane_s8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: ld1 { v0.b }[7], [x0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
entry:
%0 = load i8, ptr %a, align 1
%vld1_lane = insertelement <8 x i8> %b, i8 %0, i32 7
diff --git a/llvm/test/CodeGen/AArch64/ctlz.ll b/llvm/test/CodeGen/AArch64/ctlz.ll
index fcd1fa2..a4863d1 100644
--- a/llvm/test/CodeGen/AArch64/ctlz.ll
+++ b/llvm/test/CodeGen/AArch64/ctlz.ll
@@ -21,10 +21,8 @@ define void @v2i8(ptr %p1) {
; CHECK-GI-LABEL: v2i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr b0, [x0]
-; CHECK-GI-NEXT: ldr b1, [x0, #1]
; CHECK-GI-NEXT: add x8, x0, #1
-; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
; CHECK-GI-NEXT: clz v0.8b, v0.8b
; CHECK-GI-NEXT: st1 { v0.b }[0], [x0]
; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
@@ -59,13 +57,10 @@ define void @v3i8(ptr %p1) {
; CHECK-GI-LABEL: v3i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr b0, [x0]
-; CHECK-GI-NEXT: ldr b1, [x0, #1]
; CHECK-GI-NEXT: add x8, x0, #1
; CHECK-GI-NEXT: add x9, x0, #2
-; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
-; CHECK-GI-NEXT: ldr b1, [x0, #2]
-; CHECK-GI-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
+; CHECK-GI-NEXT: ld1 { v0.b }[2], [x9]
; CHECK-GI-NEXT: clz v0.8b, v0.8b
; CHECK-GI-NEXT: st1 { v0.b }[0], [x0]
; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
diff --git a/llvm/test/CodeGen/AArch64/ctpop.ll b/llvm/test/CodeGen/AArch64/ctpop.ll
index 10ec1d0..55f75b6 100644
--- a/llvm/test/CodeGen/AArch64/ctpop.ll
+++ b/llvm/test/CodeGen/AArch64/ctpop.ll
@@ -21,10 +21,8 @@ define void @v2i8(ptr %p1) {
; CHECK-GI-LABEL: v2i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr b0, [x0]
-; CHECK-GI-NEXT: ldr b1, [x0, #1]
; CHECK-GI-NEXT: add x8, x0, #1
-; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
; CHECK-GI-NEXT: cnt v0.8b, v0.8b
; CHECK-GI-NEXT: st1 { v0.b }[0], [x0]
; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
@@ -58,13 +56,10 @@ define void @v3i8(ptr %p1) {
; CHECK-GI-LABEL: v3i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr b0, [x0]
-; CHECK-GI-NEXT: ldr b1, [x0, #1]
; CHECK-GI-NEXT: add x8, x0, #1
; CHECK-GI-NEXT: add x9, x0, #2
-; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
-; CHECK-GI-NEXT: ldr b1, [x0, #2]
-; CHECK-GI-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
+; CHECK-GI-NEXT: ld1 { v0.b }[2], [x9]
; CHECK-GI-NEXT: cnt v0.8b, v0.8b
; CHECK-GI-NEXT: st1 { v0.b }[0], [x0]
; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
diff --git a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
index d5b9d17..c3322ca 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
@@ -208,13 +208,8 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(ptr %
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ptrue p1.d, vl8
-; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
-; CHECK-NEXT: str z0, [sp]
-; CHECK-NEXT: ld1w { z0.d }, p1/z, [x1]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp]
-; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ptrue p0.d, vl8
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x1]
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/load.ll b/llvm/test/CodeGen/AArch64/load.ll
index 3fa5d64..6b26ae98 100644
--- a/llvm/test/CodeGen/AArch64/load.ll
+++ b/llvm/test/CodeGen/AArch64/load.ll
@@ -353,19 +353,19 @@ define <7 x i8> @load_v7i8(ptr %ptr) {
; CHECK-GI-LABEL: load_v7i8:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr b0, [x0]
-; CHECK-GI-NEXT: ldr b1, [x0, #1]
+; CHECK-GI-NEXT: add x8, x0, #1
; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
-; CHECK-GI-NEXT: ldr b1, [x0, #2]
-; CHECK-GI-NEXT: mov v0.b[2], v1.b[0]
-; CHECK-GI-NEXT: ldr b1, [x0, #3]
-; CHECK-GI-NEXT: mov v0.b[3], v1.b[0]
-; CHECK-GI-NEXT: ldr b1, [x0, #4]
-; CHECK-GI-NEXT: mov v0.b[4], v1.b[0]
-; CHECK-GI-NEXT: ldr b1, [x0, #5]
-; CHECK-GI-NEXT: mov v0.b[5], v1.b[0]
-; CHECK-GI-NEXT: ldr b1, [x0, #6]
-; CHECK-GI-NEXT: mov v0.b[6], v1.b[0]
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
+; CHECK-GI-NEXT: add x8, x0, #2
+; CHECK-GI-NEXT: ld1 { v0.b }[2], [x8]
+; CHECK-GI-NEXT: add x8, x0, #3
+; CHECK-GI-NEXT: ld1 { v0.b }[3], [x8]
+; CHECK-GI-NEXT: add x8, x0, #4
+; CHECK-GI-NEXT: ld1 { v0.b }[4], [x8]
+; CHECK-GI-NEXT: add x8, x0, #5
+; CHECK-GI-NEXT: ld1 { v0.b }[5], [x8]
+; CHECK-GI-NEXT: add x8, x0, #6
+; CHECK-GI-NEXT: ld1 { v0.b }[6], [x8]
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
%a = load <7 x i8>, ptr %ptr
diff --git a/llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll b/llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll
new file mode 100644
index 0000000..e0f2155
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -reserve-regs-for-regalloc=LR,FP,X28,X27,X26,X25,X24,X23,X22,X21,X20,X19,X18,X17,X16,X15,X14,X13,X12,X11,X10,X9,X8,X7,X6,X5,X4 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -reserve-regs-for-regalloc=X30,X29,X28,X27,X26,X25,X24,X23,X22,X21,X20,X19,X18,X17,X16,X15,X14,X13,X12,X11,X10,X9,X8,X7,X6,X5,X4 | FileCheck %s
+
+; LR, FP, X30 and X29 should be correctly recognized and not used.
+
+define void @foo(i64 %v1, i64 %v2, ptr %ptr) {
+; CHECK-LABEL: foo:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: add x3, x0, x1
+; CHECK-NEXT: str x3, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: str x3, [x2, #8]
+; CHECK-NEXT: ldr x3, [x2, #16]
+; CHECK-NEXT: add x3, x0, x3
+; CHECK-NEXT: sub x3, x3, x1
+; CHECK-NEXT: str x3, [x2, #16]
+; CHECK-NEXT: ldr x3, [sp, #8] // 8-byte Folded Reload
+; CHECK-NEXT: str x3, [x2, #24]
+; CHECK-NEXT: str x0, [x2, #32]
+; CHECK-NEXT: str x1, [x2, #40]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+ %v3 = add i64 %v1, %v2
+ %p1 = getelementptr i64, ptr %ptr, i64 1
+ store volatile i64 %v3, ptr %p1, align 8
+
+ %p2 = getelementptr i64, ptr %ptr, i64 2
+ %v4 = load volatile i64, ptr %p2, align 8
+ %v5 = add i64 %v1, %v4
+ %v6 = sub i64 %v5, %v2
+ store volatile i64 %v6, ptr %p2, align 8
+
+ %p3 = getelementptr i64, ptr %ptr, i64 3
+ store volatile i64 %v3, ptr %p3, align 8
+
+ %p4 = getelementptr i64, ptr %ptr, i64 4
+ store volatile i64 %v1, ptr %p4, align 8
+ %p5 = getelementptr i64, ptr %ptr, i64 5
+ store volatile i64 %v2, ptr %p5, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/reserveXreg.ll b/llvm/test/CodeGen/AArch64/reserveXreg.ll
index e0f2155..86ed536 100644
--- a/llvm/test/CodeGen/AArch64/reserveXreg.ll
+++ b/llvm/test/CodeGen/AArch64/reserveXreg.ll
@@ -1,43 +1,303 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -reserve-regs-for-regalloc=LR,FP,X28,X27,X26,X25,X24,X23,X22,X21,X20,X19,X18,X17,X16,X15,X14,X13,X12,X11,X10,X9,X8,X7,X6,X5,X4 | FileCheck %s
-; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -reserve-regs-for-regalloc=X30,X29,X28,X27,X26,X25,X24,X23,X22,X21,X20,X19,X18,X17,X16,X15,X14,X13,X12,X11,X10,X9,X8,X7,X6,X5,X4 | FileCheck %s
-
-; LR, FP, X30 and X29 should be correctly recognized and not used.
-
-define void @foo(i64 %v1, i64 %v2, ptr %ptr) {
-; CHECK-LABEL: foo:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add x3, x0, x1
-; CHECK-NEXT: str x3, [sp, #8] // 8-byte Folded Spill
-; CHECK-NEXT: str x3, [x2, #8]
-; CHECK-NEXT: ldr x3, [x2, #16]
-; CHECK-NEXT: add x3, x0, x3
-; CHECK-NEXT: sub x3, x3, x1
-; CHECK-NEXT: str x3, [x2, #16]
-; CHECK-NEXT: ldr x3, [sp, #8] // 8-byte Folded Reload
-; CHECK-NEXT: str x3, [x2, #24]
-; CHECK-NEXT: str x0, [x2, #32]
-; CHECK-NEXT: str x1, [x2, #40]
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
- %v3 = add i64 %v1, %v2
- %p1 = getelementptr i64, ptr %ptr, i64 1
- store volatile i64 %v3, ptr %p1, align 8
-
- %p2 = getelementptr i64, ptr %ptr, i64 2
- %v4 = load volatile i64, ptr %p2, align 8
- %v5 = add i64 %v1, %v4
- %v6 = sub i64 %v5, %v2
- store volatile i64 %v6, ptr %p2, align 8
-
- %p3 = getelementptr i64, ptr %ptr, i64 3
- store volatile i64 %v3, ptr %p3, align 8
-
- %p4 = getelementptr i64, ptr %ptr, i64 4
- store volatile i64 %v1, ptr %p4, align 8
- %p5 = getelementptr i64, ptr %ptr, i64 5
- store volatile i64 %v2, ptr %p5, align 8
+;; Check if manually reserved registers are always excluded from being saved by
+;; the function prolog/epilog, even for callee-saved ones, as per GCC behavior.
+;; X19(BP, LLVM specific), X29(FP), X30(LP), X31(SP) are special so
+;; they are not checked.
+
+; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s
+
+define preserve_mostcc void @t1() "target-features"="+reserve-x1" {
+; CHECK-LABEL: t1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w1, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x1},{x1}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t2() "target-features"="+reserve-x2" {
+; CHECK-LABEL: t2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w2, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x2},{x2}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t3() "target-features"="+reserve-x3" {
+; CHECK-LABEL: t3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w3, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x3},{x3}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t4() "target-features"="+reserve-x4" {
+; CHECK-LABEL: t4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w4, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x4},{x4}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t5() "target-features"="+reserve-x5" {
+; CHECK-LABEL: t5:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w5, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x5},{x5}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t6() "target-features"="+reserve-x6" {
+; CHECK-LABEL: t6:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w6, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x6},{x6}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t7() "target-features"="+reserve-x7" {
+; CHECK-LABEL: t7:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w7, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x7},{x7}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t8() "target-features"="+reserve-x8" {
+; CHECK-LABEL: t8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x8},{x8}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t9() "target-features"="+reserve-x9" {
+; CHECK-LABEL: t9:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w9, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x9},{x9}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t10() "target-features"="+reserve-x10" {
+; CHECK-LABEL: t10:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w10, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x10},{x10}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t11() "target-features"="+reserve-x11" {
+; CHECK-LABEL: t11:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w11, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x11},{x11}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t12() "target-features"="+reserve-x12" {
+; CHECK-LABEL: t12:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w12, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x12},{x12}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t13() "target-features"="+reserve-x13" {
+; CHECK-LABEL: t13:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w13, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x13},{x13}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t14() "target-features"="+reserve-x14" {
+; CHECK-LABEL: t14:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w14, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x14},{x14}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t15() "target-features"="+reserve-x15" {
+; CHECK-LABEL: t15:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w15, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x15},{x15}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t16() "target-features"="+reserve-x16" {
+; CHECK-LABEL: t16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w16, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x16},{x16}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t17() "target-features"="+reserve-x17" {
+; CHECK-LABEL: t17:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w17, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x17},{x17}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t18() "target-features"="+reserve-x18" {
+; CHECK-LABEL: t18:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w18, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x18},{x18}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t20() "target-features"="+reserve-x20" {
+; CHECK-LABEL: t20:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w20, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x20},{x20}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t21() "target-features"="+reserve-x21" {
+; CHECK-LABEL: t21:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w21, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x21},{x21}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t22() "target-features"="+reserve-x22" {
+; CHECK-LABEL: t22:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w22, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x22},{x22}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t23() "target-features"="+reserve-x23" {
+; CHECK-LABEL: t23:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w23, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x23},{x23}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t24() "target-features"="+reserve-x24" {
+; CHECK-LABEL: t24:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w24, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x24},{x24}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t25() "target-features"="+reserve-x25" {
+; CHECK-LABEL: t25:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w25, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x25},{x25}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t26() "target-features"="+reserve-x26" {
+; CHECK-LABEL: t26:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w26, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x26},{x26}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t27() "target-features"="+reserve-x27" {
+; CHECK-LABEL: t27:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w27, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x27},{x27}"(i64 256)
+ ret void
+}
+
+define preserve_mostcc void @t28() "target-features"="+reserve-x28" {
+; CHECK-LABEL: t28:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w28, #256
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: ret
+ call i64 asm sideeffect "", "={x28},{x28}"(i64 256)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
index d54dde3..e1018bb 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
@@ -209,13 +209,11 @@ define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr b0, [x0]
; CHECK-GI-NEXT: ldr b1, [x1]
+; CHECK-GI-NEXT: add x8, x0, #1
+; CHECK-GI-NEXT: add x9, x1, #1
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
+; CHECK-GI-NEXT: ld1 { v1.b }[1], [x9]
; CHECK-GI-NEXT: add x8, x2, #1
-; CHECK-GI-NEXT: ldr b2, [x0, #1]
-; CHECK-GI-NEXT: ldr b3, [x1, #1]
-; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v1.b[0], v1.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
; CHECK-GI-NEXT: sqadd v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: st1 { v0.b }[0], [x2]
; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
diff --git a/llvm/test/CodeGen/AArch64/setcc-fsh.ll b/llvm/test/CodeGen/AArch64/setcc-fsh.ll
index 08bfe28..472723b 100644
--- a/llvm/test/CodeGen/AArch64/setcc-fsh.ll
+++ b/llvm/test/CodeGen/AArch64/setcc-fsh.ll
@@ -248,3 +248,27 @@ define i1 @fshl_or_ne_2(i32 %x, i32 %y) {
%r = icmp ne i32 %f, 2
ret i1 %r
}
+
+define i1 @fshr_0_or_eq_0(i16 %x, i16 %y) {
+; CHECK-LABEL: fshr_0_or_eq_0:
+; CHECK: // %bb.0:
+; CHECK-NEXT: tst w0, #0xffff
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %or = or i16 %x, %y
+ %f = call i16 @llvm.fshr.i16(i16 %or, i16 %x, i16 0)
+ %r = icmp eq i16 %f, 0
+ ret i1 %r
+}
+
+define i1 @fshr_32_or_eq_0(i16 %x, i16 %y) {
+; CHECK-LABEL: fshr_32_or_eq_0:
+; CHECK: // %bb.0:
+; CHECK-NEXT: tst w0, #0xffff
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %or = or i16 %x, %y
+ %f = call i16 @llvm.fshr.i16(i16 %or, i16 %x, i16 32)
+ %r = icmp eq i16 %f, 0
+ ret i1 %r
+}
diff --git a/llvm/test/CodeGen/AArch64/shufflevector.ll b/llvm/test/CodeGen/AArch64/shufflevector.ll
index 4c8f0c9..e5c07e0 100644
--- a/llvm/test/CodeGen/AArch64/shufflevector.ll
+++ b/llvm/test/CodeGen/AArch64/shufflevector.ll
@@ -208,14 +208,13 @@ define <2 x i1> @shufflevector_v2i1(<2 x i1> %a, <2 x i1> %b){
;
; CHECK-GI-LABEL: shufflevector_v2i1:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov w8, v1.s[1]
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov w9, v0.s[1]
-; CHECK-GI-NEXT: mov v1.b[1], w8
-; CHECK-GI-NEXT: mov v0.b[1], w9
-; CHECK-GI-NEXT: mov b1, v1.b[1]
-; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov w8, v0.s[1]
+; CHECK-GI-NEXT: mov w9, v1.s[1]
+; CHECK-GI-NEXT: mov v0.b[1], w8
+; CHECK-GI-NEXT: mov v1.b[1], w9
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[1]
; CHECK-GI-NEXT: umov w8, v0.b[0]
; CHECK-GI-NEXT: umov w9, v0.b[1]
; CHECK-GI-NEXT: mov v0.s[0], w8
diff --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
index dc39ad0..085857c 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
@@ -210,13 +210,11 @@ define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr b0, [x0]
; CHECK-GI-NEXT: ldr b1, [x1]
+; CHECK-GI-NEXT: add x8, x0, #1
+; CHECK-GI-NEXT: add x9, x1, #1
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
+; CHECK-GI-NEXT: ld1 { v1.b }[1], [x9]
; CHECK-GI-NEXT: add x8, x2, #1
-; CHECK-GI-NEXT: ldr b2, [x0, #1]
-; CHECK-GI-NEXT: ldr b3, [x1, #1]
-; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v1.b[0], v1.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
; CHECK-GI-NEXT: sqsub v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: st1 { v0.b }[0], [x2]
; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
index 965af2a..e103137 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
@@ -331,8 +331,7 @@ define void @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, ptr %p) nounwi
; CHECK-LABEL: extract_fixed_v4i64_nxv2i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ext z0.b, z0.b, z0.b, #32
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%retval = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> %vec, i64 4)
store <4 x i64> %retval, ptr %p
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
index eaa9923..ac4c387 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
@@ -18,7 +18,7 @@ define void @st1d_fixed(ptr %ptr) #0 {
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x20]
; CHECK-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload
-; CHECK-NEXT: st1d { z0.d }, p0, [x19]
+; CHECK-NEXT: str z0, [x19]
; CHECK-NEXT: ldp x20, x19, [sp, #144] // 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #160
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll
index 55f70b2..00002dd 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll
@@ -544,11 +544,10 @@ define void @extract_subvector_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
define void @extract_subvector_legalization_v8i32() vscale_range(2,2) #0 {
; CHECK-LABEL: extract_subvector_legalization_v8i32:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: adrp x8, .LCPI40_0
; CHECK-NEXT: add x8, x8, :lo12:.LCPI40_0
; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8]
+; CHECK-NEXT: ldr z0, [x8]
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: ext z1.b, z1.b, z0.b, #16
; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll
index 25876f0..da1aa4c 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-convert.ll
@@ -7,12 +7,12 @@ target triple = "aarch64-unknown-linux-gnu"
define void @fp_convert_combine_crash(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fp_convert_combine_crash:
; CHECK: // %bb.0:
+; CHECK-NEXT: fmov z0.s, #8.00000000
+; CHECK-NEXT: ldr z1, [x0]
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: fmov z1.s, #8.00000000
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT: fmul z0.s, z0.s, z1.s
+; CHECK-NEXT: fmul z0.s, z1.s, z0.s
; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: ret
%f = load <8 x float>, ptr %a
%mul.i = fmul <8 x float> %f, <float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00,
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
index 1bd688d..72686c3 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
@@ -20,32 +20,31 @@ define dso_local void @func1(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v
; CHECK-NEXT: .cfi_offset w21, -24
; CHECK-NEXT: .cfi_offset w22, -32
; CHECK-NEXT: .cfi_offset w29, -48
-; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x10, sp, #176
; CHECK-NEXT: add x8, sp, #48
; CHECK-NEXT: add x9, sp, #144
-; CHECK-NEXT: add x20, sp, #176
-; CHECK-NEXT: ldr x15, [sp, #104]
-; CHECK-NEXT: ld1d { z3.d }, p0/z, [x10]
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8]
+; CHECK-NEXT: ldr z3, [x10]
+; CHECK-NEXT: ldr z0, [x8]
; CHECK-NEXT: add x8, sp, #112
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x9]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x8]
-; CHECK-NEXT: ldur q4, [sp, #88]
+; CHECK-NEXT: ldr z2, [x9]
+; CHECK-NEXT: ldr z1, [x8]
+; CHECK-NEXT: add x20, sp, #176
; CHECK-NEXT: ldp x9, x8, [sp, #328]
-; CHECK-NEXT: ldr x19, [sp, #272]
+; CHECK-NEXT: ldr x15, [sp, #104]
; CHECK-NEXT: ldp x11, x10, [sp, #312]
+; CHECK-NEXT: ldur q4, [sp, #88]
; CHECK-NEXT: ldp x13, x12, [sp, #296]
+; CHECK-NEXT: ldr x19, [sp, #272]
; CHECK-NEXT: ldp x18, x14, [sp, #280]
; CHECK-NEXT: ldp x16, x17, [sp, #208]
; CHECK-NEXT: ldp x21, x22, [sp, #352]
-; CHECK-NEXT: st1d { z3.d }, p0, [x20]
+; CHECK-NEXT: str z3, [x20]
; CHECK-NEXT: add x20, sp, #144
-; CHECK-NEXT: st1d { z2.d }, p0, [x20]
+; CHECK-NEXT: str z2, [x20]
; CHECK-NEXT: add x20, sp, #112
-; CHECK-NEXT: st1d { z1.d }, p0, [x20]
+; CHECK-NEXT: str z1, [x20]
; CHECK-NEXT: add x20, sp, #48
-; CHECK-NEXT: st1d { z0.d }, p0, [x20]
+; CHECK-NEXT: str z0, [x20]
; CHECK-NEXT: stp x21, x22, [sp, #352]
; CHECK-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: stp x19, x18, [sp, #272]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
index 281ffff..2f76be6 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -debug-only=isel < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
@@ -9,16 +9,15 @@ target triple = "aarch64-unknown-linux-gnu"
; accessing fixed width objects.
define void @foo(ptr %a) #0 {
; CHECK-LABEL: foo:
-; CHECK: SelectionDAG has 15 nodes:
+; CHECK: SelectionDAG has 13 nodes:
; CHECK-NEXT: t0: ch,glue = EntryToken
-; CHECK-NEXT: t12: nxv2i1 = PTRUE_D TargetConstant:i32<31>
; CHECK-NEXT: t2: i64,ch = CopyFromReg t0, Register:i64 %0
-; CHECK-NEXT: t18: nxv2i64,ch = LD1D_IMM<Mem:(volatile load (s512) from %ir.a)> t12, t2, TargetConstant:i64<0>, t0
+; CHECK-NEXT: t21: nxv2i64,ch = LDR_ZXI<Mem:(volatile load (<vscale x 1 x s128>) from %ir.a, align 64)> t2, TargetConstant:i64<0>, t0
; CHECK-NEXT: t8: i64 = ADDXri TargetFrameIndex:i64<1>, TargetConstant:i32<0>, TargetConstant:i32<0>
; CHECK-NEXT: t6: i64 = ADDXri TargetFrameIndex:i64<0>, TargetConstant:i32<0>, TargetConstant:i32<0>
-; CHECK-NEXT: t17: ch = ST1D_IMM<Mem:(volatile store (s512) into %ir.r0)> t18, t12, t6, TargetConstant:i64<0>, t18:1
-; CHECK-NEXT: t16: ch = ST1D_IMM<Mem:(volatile store (s512) into %ir.r1)> t18, t12, t8, TargetConstant:i64<0>, t17
-; CHECK-NEXT: t10: ch = RET_ReallyLR t16
+; CHECK-NEXT: t22: ch = STR_ZXI<Mem:(volatile store (<vscale x 1 x s128>) into %ir.r0, align 64)> t21, t6, TargetConstant:i64<0>, t21:1
+; CHECK-NEXT: t23: ch = STR_ZXI<Mem:(volatile store (<vscale x 1 x s128>) into %ir.r1, align 64)> t21, t8, TargetConstant:i64<0>, t22
+; CHECK-NEXT: t10: ch = RET_ReallyLR t23
; CHECK-EMPTY:
entry:
%r0 = alloca <8 x i64>
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
index d7b67d7..7b82c0a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
@@ -380,11 +380,10 @@ define void @v8i32(ptr %ldptr, ptr %stptr) {
;
; CHECK-256-LABEL: v8i32:
; CHECK-256: // %bb.0:
-; CHECK-256-NEXT: ptrue p0.s
-; CHECK-256-NEXT: ld1w { z0.s }, p0/z, [x0, #2, mul vl]
-; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl]
-; CHECK-256-NEXT: st1w { z0.s }, p0, [x1, #2, mul vl]
-; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, #1, mul vl]
+; CHECK-256-NEXT: ldr z0, [x0, #2, mul vl]
+; CHECK-256-NEXT: ldr z1, [x0, #1, mul vl]
+; CHECK-256-NEXT: str z0, [x1, #2, mul vl]
+; CHECK-256-NEXT: str z1, [x1, #1, mul vl]
; CHECK-256-NEXT: ret
;
; CHECK-512-LABEL: v8i32:
@@ -437,8 +436,7 @@ define void @v8i32_vscale(ptr %0) {
; CHECK-256-LABEL: v8i32_vscale:
; CHECK-256: // %bb.0:
; CHECK-256-NEXT: mov z0.s, #1 // =0x1
-; CHECK-256-NEXT: ptrue p0.s
-; CHECK-256-NEXT: st1w { z0.s }, p0, [x0, #2, mul vl]
+; CHECK-256-NEXT: str z0, [x0, #2, mul vl]
; CHECK-256-NEXT: ret
;
; CHECK-512-LABEL: v8i32_vscale:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll
index 1512f54..d5aad76 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll
@@ -6,11 +6,10 @@ target triple = "aarch64-unknown-linux-gnu"
define void @add_v64i8(ptr %a, ptr %b) #0 {
; CHECK-LABEL: add_v64i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
-; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: add z0.b, z0.b, z1.b
-; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <64 x i8>, ptr %a
%op2 = load <64 x i8>, ptr %b
@@ -22,11 +21,10 @@ define void @add_v64i8(ptr %a, ptr %b) #0 {
define void @add_v32i16(ptr %a, ptr %b, ptr %c) #0 {
; CHECK-LABEL: add_v32i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: add z0.h, z0.h, z1.h
-; CHECK-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <32 x i16>, ptr %a
%op2 = load <32 x i16>, ptr %b
@@ -38,10 +36,10 @@ define void @add_v32i16(ptr %a, ptr %b, ptr %c) #0 {
define void @abs_v16i32(ptr %a) #0 {
; CHECK-LABEL: abs_v16i32:
; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: abs z0.s, p0/m, z0.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <16 x i32>, ptr %a
%res = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %op1, i1 false)
@@ -52,10 +50,10 @@ define void @abs_v16i32(ptr %a) #0 {
define void @abs_v8i64(ptr %a) #0 {
; CHECK-LABEL: abs_v8i64:
; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: abs z0.d, p0/m, z0.d
-; CHECK-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <8 x i64>, ptr %a
%res = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %op1, i1 false)
@@ -66,11 +64,10 @@ define void @abs_v8i64(ptr %a) #0 {
define void @fadd_v32f16(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v32f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: fadd z0.h, z0.h, z1.h
-; CHECK-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <32 x half>, ptr %a
%op2 = load <32 x half>, ptr %b
@@ -82,11 +79,10 @@ define void @fadd_v32f16(ptr %a, ptr %b) #0 {
define void @fadd_v16f32(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v16f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: fadd z0.s, z0.s, z1.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <16 x float>, ptr %a
%op2 = load <16 x float>, ptr %b
@@ -98,11 +94,10 @@ define void @fadd_v16f32(ptr %a, ptr %b) #0 {
define void @fadd_v8f64(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v8f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: fadd z0.d, z0.d, z1.d
-; CHECK-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <8 x double>, ptr %a
%op2 = load <8 x double>, ptr %b
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
index 0d0b5cb..0cda4d9 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
@@ -194,13 +194,12 @@ define void @test_revhv32i16(ptr %a) #0 {
define void @test_rev_elts_fail(ptr %a) #1 {
; CHECK-LABEL: test_rev_elts_fail:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: adrp x8, .LCPI11_0
; CHECK-NEXT: add x8, x8, :lo12:.LCPI11_0
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x8]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x8]
; CHECK-NEXT: tbl z0.d, { z0.d }, z1.d
-; CHECK-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <4 x i64>, ptr %a
%tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -275,10 +274,9 @@ define void @test_revv8i32(ptr %a) #0 {
define void @test_revv32i8_vl256(ptr %a) #1 {
; CHECK-LABEL: test_revv32i8_vl256:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: rev z0.b, z0.b
-; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
%tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -290,10 +288,9 @@ define void @test_revv32i8_vl256(ptr %a) #1 {
define void @test_revv16i16_vl256(ptr %a) #1 {
; CHECK-LABEL: test_revv16i16_vl256:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: rev z0.h, z0.h
-; CHECK-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
%tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 poison, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -305,10 +302,9 @@ define void @test_revv16i16_vl256(ptr %a) #1 {
define void @test_revv8f32_vl256(ptr %a) #1 {
; CHECK-LABEL: test_revv8f32_vl256:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: rev z0.s, z0.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <8 x float>, ptr %a
%tmp2 = shufflevector <8 x float> %tmp1, <8 x float> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -320,10 +316,9 @@ define void @test_revv8f32_vl256(ptr %a) #1 {
define void @test_revv4f64_vl256(ptr %a) #1 {
; CHECK-LABEL: test_revv4f64_vl256:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: rev z0.d, z0.d
-; CHECK-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <4 x double>, ptr %a
%tmp2 = shufflevector <4 x double> %tmp1, <4 x double> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -335,10 +330,9 @@ define void @test_revv4f64_vl256(ptr %a) #1 {
define void @test_revv8i32v8i32(ptr %a, ptr %b) #1 {
; CHECK-LABEL: test_revv8i32v8i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x1]
; CHECK-NEXT: rev z0.s, z0.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
%tmp2 = load <8 x i32>, ptr %b
@@ -351,13 +345,12 @@ define void @test_revv8i32v8i32(ptr %a, ptr %b) #1 {
define void @test_rev_fail(ptr %a) #1 {
; CHECK-LABEL: test_rev_fail:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: adrp x8, .LCPI20_0
; CHECK-NEXT: add x8, x8, :lo12:.LCPI20_0
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1h { z1.h }, p0/z, [x8]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x8]
; CHECK-NEXT: tbl z0.h, { z0.h }, z1.h
-; CHECK-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
%tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
@@ -379,7 +372,6 @@ define void @test_revv8i16v8i16(ptr %a, ptr %b, ptr %c) #1 {
; CHECK-NEXT: ldr q0, [x1]
; CHECK-NEXT: ldr q5, [x0]
; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov h1, v0.h[4]
; CHECK-NEXT: mov h2, v0.h[5]
; CHECK-NEXT: mov h3, v0.h[6]
@@ -409,8 +401,8 @@ define void @test_revv8i16v8i16(ptr %a, ptr %b, ptr %c) #1 {
; CHECK-NEXT: str h0, [sp, #10]
; CHECK-NEXT: str h1, [sp, #8]
; CHECK-NEXT: str h2, [sp, #4]
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8]
-; CHECK-NEXT: st1h { z0.h }, p0, [x2]
+; CHECK-NEXT: ldr z0, [x8]
+; CHECK-NEXT: str z0, [x2]
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
index 2514383..24c5dcc 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
@@ -7,11 +7,10 @@ target triple = "aarch64-unknown-linux-gnu"
define void @zip1_v32i8(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: zip1_v32i8:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.b
-; VBITS_EQ_256-NEXT: ld1b { z0.b }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1b { z1.b }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
; VBITS_EQ_256-NEXT: zip1 z0.b, z0.b, z1.b
-; VBITS_EQ_256-NEXT: st1b { z0.b }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: zip1_v32i8:
@@ -32,30 +31,28 @@ define void @zip1_v32i8(ptr %a, ptr %b) #0 {
define void @zip_v32i16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: zip_v32i16:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.h
-; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0, #1, mul vl]
-; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x1, #1, mul vl]
-; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0, #1, mul vl]
+; VBITS_EQ_256-NEXT: ldr z1, [x0]
+; VBITS_EQ_256-NEXT: ldr z2, [x1, #1, mul vl]
+; VBITS_EQ_256-NEXT: ldr z3, [x1]
; VBITS_EQ_256-NEXT: zip1 z5.h, z0.h, z2.h
; VBITS_EQ_256-NEXT: zip2 z0.h, z0.h, z2.h
; VBITS_EQ_256-NEXT: zip1 z4.h, z1.h, z3.h
; VBITS_EQ_256-NEXT: zip2 z1.h, z1.h, z3.h
; VBITS_EQ_256-NEXT: add z2.h, z4.h, z5.h
; VBITS_EQ_256-NEXT: add z0.h, z1.h, z0.h
-; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
-; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, #1, mul vl]
+; VBITS_EQ_256-NEXT: str z2, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0, #1, mul vl]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: zip_v32i16:
; VBITS_EQ_512: // %bb.0:
-; VBITS_EQ_512-NEXT: ptrue p0.h
-; VBITS_EQ_512-NEXT: ld1h { z0.h }, p0/z, [x0]
-; VBITS_EQ_512-NEXT: ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_512-NEXT: ldr z0, [x0]
+; VBITS_EQ_512-NEXT: ldr z1, [x1]
; VBITS_EQ_512-NEXT: zip1 z2.h, z0.h, z1.h
; VBITS_EQ_512-NEXT: zip2 z0.h, z0.h, z1.h
; VBITS_EQ_512-NEXT: add z0.h, z2.h, z0.h
-; VBITS_EQ_512-NEXT: st1h { z0.h }, p0, [x0]
+; VBITS_EQ_512-NEXT: str z0, [x0]
; VBITS_EQ_512-NEXT: ret
%tmp1 = load <32 x i16>, ptr %a
%tmp2 = load <32 x i16>, ptr %b
@@ -69,11 +66,10 @@ define void @zip_v32i16(ptr %a, ptr %b) #0 {
define void @zip1_v16i16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: zip1_v16i16:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.h
-; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
; VBITS_EQ_256-NEXT: zip1 z0.h, z0.h, z1.h
-; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: zip1_v16i16:
@@ -94,11 +90,10 @@ define void @zip1_v16i16(ptr %a, ptr %b) #0 {
define void @zip1_v8i32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: zip1_v8i32:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.s
-; VBITS_EQ_256-NEXT: ld1w { z0.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1w { z1.s }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
; VBITS_EQ_256-NEXT: zip1 z0.s, z0.s, z1.s
-; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: zip1_v8i32:
@@ -119,13 +114,12 @@ define void @zip1_v8i32(ptr %a, ptr %b) #0 {
define void @zip_v4f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: zip_v4f64:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.d
-; VBITS_EQ_256-NEXT: ld1d { z0.d }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1d { z1.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
; VBITS_EQ_256-NEXT: zip1 z2.d, z0.d, z1.d
; VBITS_EQ_256-NEXT: zip2 z0.d, z0.d, z1.d
; VBITS_EQ_256-NEXT: fadd z0.d, z2.d, z0.d
-; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: zip_v4f64:
@@ -186,10 +180,9 @@ define void @zip_v4i32(ptr %a, ptr %b) #0 {
define void @zip1_v8i32_undef(ptr %a) #0 {
; VBITS_EQ_256-LABEL: zip1_v8i32_undef:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.s
-; VBITS_EQ_256-NEXT: ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
; VBITS_EQ_256-NEXT: zip1 z0.s, z0.s, z0.s
-; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: zip1_v8i32_undef:
@@ -208,13 +201,12 @@ define void @zip1_v8i32_undef(ptr %a) #0 {
define void @trn_v32i8(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: trn_v32i8:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.b
-; VBITS_EQ_256-NEXT: ld1b { z0.b }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1b { z1.b }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
; VBITS_EQ_256-NEXT: trn1 z2.b, z0.b, z1.b
; VBITS_EQ_256-NEXT: trn2 z0.b, z0.b, z1.b
; VBITS_EQ_256-NEXT: add z0.b, z2.b, z0.b
-; VBITS_EQ_256-NEXT: st1b { z0.b }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: trn_v32i8:
@@ -239,30 +231,28 @@ define void @trn_v32i8(ptr %a, ptr %b) #0 {
define void @trn_v32i16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: trn_v32i16:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.h
-; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x1]
-; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0, #1, mul vl]
-; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1, #1, mul vl]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
+; VBITS_EQ_256-NEXT: ldr z2, [x0, #1, mul vl]
+; VBITS_EQ_256-NEXT: ldr z3, [x1, #1, mul vl]
; VBITS_EQ_256-NEXT: trn1 z4.h, z0.h, z1.h
; VBITS_EQ_256-NEXT: trn2 z0.h, z0.h, z1.h
; VBITS_EQ_256-NEXT: trn1 z1.h, z2.h, z3.h
; VBITS_EQ_256-NEXT: trn2 z2.h, z2.h, z3.h
; VBITS_EQ_256-NEXT: add z0.h, z4.h, z0.h
; VBITS_EQ_256-NEXT: add z1.h, z1.h, z2.h
-; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0, #1, mul vl]
+; VBITS_EQ_256-NEXT: str z0, [x0]
+; VBITS_EQ_256-NEXT: str z1, [x0, #1, mul vl]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: trn_v32i16:
; VBITS_EQ_512: // %bb.0:
-; VBITS_EQ_512-NEXT: ptrue p0.h
-; VBITS_EQ_512-NEXT: ld1h { z0.h }, p0/z, [x0]
-; VBITS_EQ_512-NEXT: ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_512-NEXT: ldr z0, [x0]
+; VBITS_EQ_512-NEXT: ldr z1, [x1]
; VBITS_EQ_512-NEXT: trn1 z2.h, z0.h, z1.h
; VBITS_EQ_512-NEXT: trn2 z0.h, z0.h, z1.h
; VBITS_EQ_512-NEXT: add z0.h, z2.h, z0.h
-; VBITS_EQ_512-NEXT: st1h { z0.h }, p0, [x0]
+; VBITS_EQ_512-NEXT: str z0, [x0]
; VBITS_EQ_512-NEXT: ret
%tmp1 = load <32 x i16>, ptr %a
%tmp2 = load <32 x i16>, ptr %b
@@ -276,13 +266,12 @@ define void @trn_v32i16(ptr %a, ptr %b) #0 {
define void @trn_v16i16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: trn_v16i16:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.h
-; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
; VBITS_EQ_256-NEXT: trn1 z2.h, z0.h, z1.h
; VBITS_EQ_256-NEXT: trn2 z0.h, z0.h, z1.h
; VBITS_EQ_256-NEXT: add z0.h, z2.h, z0.h
-; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: trn_v16i16:
@@ -307,13 +296,12 @@ define void @trn_v16i16(ptr %a, ptr %b) #0 {
define void @trn_v8i32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: trn_v8i32:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.s
-; VBITS_EQ_256-NEXT: ld1w { z0.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1w { z1.s }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
; VBITS_EQ_256-NEXT: trn1 z2.s, z0.s, z1.s
; VBITS_EQ_256-NEXT: trn2 z0.s, z0.s, z1.s
; VBITS_EQ_256-NEXT: add z0.s, z2.s, z0.s
-; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: trn_v8i32:
@@ -338,13 +326,12 @@ define void @trn_v8i32(ptr %a, ptr %b) #0 {
define void @trn_v4f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-LABEL: trn_v4f64:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.d
-; VBITS_EQ_256-NEXT: ld1d { z0.d }, p0/z, [x0]
-; VBITS_EQ_256-NEXT: ld1d { z1.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
+; VBITS_EQ_256-NEXT: ldr z1, [x1]
; VBITS_EQ_256-NEXT: trn1 z2.d, z0.d, z1.d
; VBITS_EQ_256-NEXT: trn2 z0.d, z0.d, z1.d
; VBITS_EQ_256-NEXT: fadd z0.d, z2.d, z0.d
-; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: trn_v4f64:
@@ -389,12 +376,11 @@ define void @trn_v4f32(ptr %a, ptr %b) #0 {
define void @trn_v8i32_undef(ptr %a) #0 {
; VBITS_EQ_256-LABEL: trn_v8i32_undef:
; VBITS_EQ_256: // %bb.0:
-; VBITS_EQ_256-NEXT: ptrue p0.s
-; VBITS_EQ_256-NEXT: ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT: ldr z0, [x0]
; VBITS_EQ_256-NEXT: trn1 z1.s, z0.s, z0.s
; VBITS_EQ_256-NEXT: trn2 z0.s, z0.s, z0.s
; VBITS_EQ_256-NEXT: add z0.s, z1.s, z0.s
-; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: str z0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_EQ_512-LABEL: trn_v8i32_undef:
@@ -419,11 +405,10 @@ define void @trn_v8i32_undef(ptr %a) #0 {
define void @zip2_v32i8(ptr %a, ptr %b) #1 {
; CHECK-LABEL: zip2_v32i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
-; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: zip2 z0.b, z0.b, z1.b
-; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load volatile <32 x i8>, ptr %a
%tmp2 = load volatile <32 x i8>, ptr %b
@@ -437,11 +422,10 @@ define void @zip2_v32i8(ptr %a, ptr %b) #1 {
define void @zip2_v16i16(ptr %a, ptr %b) #1 {
; CHECK-LABEL: zip2_v16i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: zip2 z0.h, z0.h, z1.h
-; CHECK-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load volatile <16 x i16>, ptr %a
%tmp2 = load volatile <16 x i16>, ptr %b
@@ -455,11 +439,10 @@ define void @zip2_v16i16(ptr %a, ptr %b) #1 {
define void @zip2_v8i32(ptr %a, ptr %b) #1 {
; CHECK-LABEL: zip2_v8i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: zip2 z0.s, z0.s, z1.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load volatile <8 x i32>, ptr %a
%tmp2 = load volatile <8 x i32>, ptr %b
@@ -472,10 +455,9 @@ define void @zip2_v8i32(ptr %a, ptr %b) #1 {
define void @zip2_v8i32_undef(ptr %a) #1 {
; CHECK-LABEL: zip2_v8i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: zip2 z0.s, z0.s, z0.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load volatile <8 x i32>, ptr %a
%tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
@@ -488,13 +470,12 @@ define void @zip2_v8i32_undef(ptr %a) #1 {
define void @uzp_v32i8(ptr %a, ptr %b) #1 {
; CHECK-LABEL: uzp_v32i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
-; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: uzp1 z2.b, z0.b, z1.b
; CHECK-NEXT: uzp2 z0.b, z0.b, z1.b
; CHECK-NEXT: add z0.b, z2.b, z0.b
-; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
%tmp2 = load <32 x i8>, ptr %b
@@ -511,19 +492,18 @@ define void @uzp_v32i8(ptr %a, ptr %b) #1 {
define void @uzp_v32i16(ptr %a, ptr %b) #1 {
; CHECK-LABEL: uzp_v32i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
-; CHECK-NEXT: ld1h { z2.h }, p0/z, [x1, #1, mul vl]
-; CHECK-NEXT: ld1h { z3.h }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: ldr z2, [x1, #1, mul vl]
+; CHECK-NEXT: ldr z3, [x1]
; CHECK-NEXT: uzp1 z4.h, z1.h, z0.h
; CHECK-NEXT: uzp2 z0.h, z1.h, z0.h
; CHECK-NEXT: uzp1 z1.h, z3.h, z2.h
; CHECK-NEXT: uzp2 z2.h, z3.h, z2.h
; CHECK-NEXT: add z0.h, z4.h, z0.h
; CHECK-NEXT: add z1.h, z1.h, z2.h
-; CHECK-NEXT: st1h { z0.h }, p0, [x0]
-; CHECK-NEXT: st1h { z1.h }, p0, [x0, #1, mul vl]
+; CHECK-NEXT: str z0, [x0]
+; CHECK-NEXT: str z1, [x0, #1, mul vl]
; CHECK-NEXT: ret
%tmp1 = load <32 x i16>, ptr %a
%tmp2 = load <32 x i16>, ptr %b
@@ -539,13 +519,12 @@ define void @uzp_v32i16(ptr %a, ptr %b) #1 {
define void @uzp_v16i16(ptr %a, ptr %b) #1 {
; CHECK-LABEL: uzp_v16i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: uzp1 z2.h, z0.h, z1.h
; CHECK-NEXT: uzp2 z0.h, z0.h, z1.h
; CHECK-NEXT: add z0.h, z2.h, z0.h
-; CHECK-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
%tmp2 = load <16 x i16>, ptr %b
@@ -561,13 +540,12 @@ define void @uzp_v16i16(ptr %a, ptr %b) #1 {
define void @uzp_v8f32(ptr %a, ptr %b) #1 {
; CHECK-LABEL: uzp_v8f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: uzp1 z2.s, z0.s, z1.s
; CHECK-NEXT: uzp2 z0.s, z0.s, z1.s
; CHECK-NEXT: fadd z0.s, z2.s, z0.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <8 x float>, ptr %a
%tmp2 = load <8 x float>, ptr %b
@@ -583,13 +561,12 @@ define void @uzp_v8f32(ptr %a, ptr %b) #1 {
define void @uzp_v4i64(ptr %a, ptr %b) #1 {
; CHECK-LABEL: uzp_v4i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
; CHECK-NEXT: uzp2 z0.d, z0.d, z1.d
; CHECK-NEXT: add z0.d, z2.d, z0.d
-; CHECK-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <4 x i64>, ptr %a
%tmp2 = load <4 x i64>, ptr %b
@@ -624,12 +601,11 @@ define void @uzp_v8i16(ptr %a, ptr %b) #1 {
define void @uzp_v8i32_undef(ptr %a) #1 {
; CHECK-LABEL: uzp_v8i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: uzp1 z1.s, z0.s, z0.s
; CHECK-NEXT: uzp2 z0.s, z0.s, z0.s
; CHECK-NEXT: add z0.s, z1.s, z0.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
%tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 0, i32 2, i32 4, i32 6>
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll
index 4d8855c..23ae5f0 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll
@@ -49,8 +49,8 @@ define i1 @ptest_v16i1_512bit_min_sve(ptr %a, ptr %b) vscale_range(4, 0) {
define i1 @ptest_v16i1_512bit_sve(ptr %a, ptr %b) vscale_range(4, 4) {
; CHECK-LABEL: ptest_v16i1_512bit_sve:
; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: fcmne p0.s, p0/z, z0.s, #0.0
; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
@@ -99,11 +99,11 @@ declare i1 @llvm.vector.reduce.or.i1.v16i1(<16 x i1>)
define i1 @ptest_and_v16i1_512bit_sve(ptr %a, ptr %b) vscale_range(4, 4) {
; CHECK-LABEL: ptest_and_v16i1_512bit_sve:
; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT: fcmne p1.s, p0/z, z0.s, #0.0
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x1]
-; CHECK-NEXT: fcmne p0.s, p1/z, z0.s, #0.0
+; CHECK-NEXT: fcmne p0.s, p0/z, z0.s, #0.0
+; CHECK-NEXT: ldr z0, [x1]
+; CHECK-NEXT: fcmne p0.s, p0/z, z0.s, #0.0
; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
index b24a951..d916f26 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
@@ -64,7 +64,7 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang
; CHECK-NEXT: lsl z2.s, z2.s, #31
; CHECK-NEXT: asr z0.s, z0.s, #31
; CHECK-NEXT: cmpne p1.s, p0/z, z1.s, #0
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: ldr z1, [x0]
; CHECK-NEXT: lsl z3.s, z3.s, #31
; CHECK-NEXT: asr z2.s, z2.s, #31
; CHECK-NEXT: and z0.s, z0.s, #0x1
@@ -72,19 +72,19 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang
; CHECK-NEXT: and z2.s, z2.s, #0x1
; CHECK-NEXT: mov z1.s, p1/m, #0 // =0x0
; CHECK-NEXT: cmpne p2.s, p0/z, z0.s, #0
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ldr z0, [x0, #2, mul vl]
; CHECK-NEXT: and z3.s, z3.s, #0x1
-; CHECK-NEXT: cmpne p4.s, p0/z, z2.s, #0
-; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: st1w { z1.s }, p0, [x0]
+; CHECK-NEXT: str z1, [x0]
; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0
-; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, #0
+; CHECK-NEXT: ldr z3, [x0, #3, mul vl]
+; CHECK-NEXT: ldr z2, [x0, #1, mul vl]
; CHECK-NEXT: mov z0.s, p2/m, #0 // =0x0
-; CHECK-NEXT: mov z2.s, p4/m, #0 // =0x0
-; CHECK-NEXT: st1w { z0.s }, p0, [x0, #2, mul vl]
; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0
-; CHECK-NEXT: st1w { z2.s }, p0, [x0, #1, mul vl]
-; CHECK-NEXT: st1w { z3.s }, p0, [x0, #3, mul vl]
+; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0
+; CHECK-NEXT: str z0, [x0, #2, mul vl]
+; CHECK-NEXT: str z3, [x0, #3, mul vl]
+; CHECK-NEXT: str z2, [x0, #1, mul vl]
; CHECK-NEXT: .LBB1_2: // %exit
; CHECK-NEXT: ret
%broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
index a0dd040..a69808d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
@@ -701,10 +701,9 @@ define void @splat_imm_v8f64(ptr %a) vscale_range(4,0) #0 {
define void @load_splat_v8f32(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-LABEL: load_splat_v8f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: mov z0.s, s0
-; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %a
%splat = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> zeroinitializer
@@ -715,10 +714,9 @@ define void @load_splat_v8f32(ptr %a, ptr %b) vscale_range(2,2) #0 {
define void @load_splat_v4f64(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-LABEL: load_splat_v4f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: mov z0.d, d0
-; CHECK-NEXT: st1d { z0.d }, p0, [x1]
+; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %a
%splat = shufflevector <4 x double> %v, <4 x double> poison, <4 x i32> zeroinitializer
@@ -729,10 +727,9 @@ define void @load_splat_v4f64(ptr %a, ptr %b) vscale_range(2,2) #0 {
define void @load_splat_v32i8(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-LABEL: load_splat_v32i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: mov z0.b, b0
-; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %a
%splat = shufflevector <32 x i8> %v, <32 x i8> poison, <32 x i32> zeroinitializer
@@ -743,10 +740,9 @@ define void @load_splat_v32i8(ptr %a, ptr %b) vscale_range(2,2) #0 {
define void @load_splat_v16i16(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-LABEL: load_splat_v16i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: mov z0.h, h0
-; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %a
%splat = shufflevector <16 x i16> %v, <16 x i16> poison, <16 x i32> zeroinitializer
@@ -757,10 +753,9 @@ define void @load_splat_v16i16(ptr %a, ptr %b) vscale_range(2,2) #0 {
define void @load_splat_v8i32(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-LABEL: load_splat_v8i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: mov z0.s, s0
-; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %a
%splat = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> zeroinitializer
@@ -771,10 +766,9 @@ define void @load_splat_v8i32(ptr %a, ptr %b) vscale_range(2,2) #0 {
define void @load_splat_v4i64(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-LABEL: load_splat_v4i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: mov z0.d, d0
-; CHECK-NEXT: st1d { z0.d }, p0, [x1]
+; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %a
%splat = shufflevector <4 x i64> %v, <4 x i64> poison, <4 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
index 20659cd..3c8b09f 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
@@ -376,11 +376,10 @@ define <8 x i8> @negative_test_shuffle_index_size_op_both_maxhw(ptr %a, ptr %b)
define <8 x i8> @shuffle_index_size_op1_maxhw(ptr %a, ptr %b) "target-features"="+sve2" vscale_range(16,16) {
; CHECK-LABEL: shuffle_index_size_op1_maxhw:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: adrp x8, .LCPI6_0
; CHECK-NEXT: add x8, x8, :lo12:.LCPI6_0
; CHECK-NEXT: ldr d1, [x0]
-; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8]
+; CHECK-NEXT: ldr z0, [x8]
; CHECK-NEXT: tbl z0.b, { z1.b }, z0.b
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index 1494864..dcf3317 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -374,11 +374,7 @@ define <vscale x 2 x i64> @insert_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, p
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0]
-; CHECK-NEXT: str z0, [sp]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp]
-; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
index 8b63119..e5ab956 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
@@ -41,16 +41,16 @@ define void @test_post_ld1_int_fixed(ptr %data, i64 %idx, ptr %addr, ptr %res_pt
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov z1.d, x8
+; CHECK-NEXT: ldr z2, [x2]
; CHECK-NEXT: ldr x8, [x0]
-; CHECK-NEXT: ptrue p2.d, vl1
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x2]
+; CHECK-NEXT: ptrue p1.d, vl1
; CHECK-NEXT: ldr x9, [x0, x1, lsl #3]
-; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d
; CHECK-NEXT: mov z0.d, z2.d
-; CHECK-NEXT: mov z0.d, p2/m, x8
-; CHECK-NEXT: mov z2.d, p1/m, x9
+; CHECK-NEXT: mov z0.d, p1/m, x8
+; CHECK-NEXT: mov z2.d, p0/m, x9
; CHECK-NEXT: add z0.d, z0.d, z2.d
-; CHECK-NEXT: st1d { z0.d }, p0, [x3]
+; CHECK-NEXT: str z0, [x3]
; CHECK-NEXT: ret
%A = load <4 x i64>, ptr %addr
%ld1 = load i64, ptr %data
@@ -70,15 +70,15 @@ define void @test_post_ld1_double_fixed(ptr %data, i64 %idx, ptr %addr, ptr %res
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov z1.d, x8
-; CHECK-NEXT: ptrue p2.d, vl1
+; CHECK-NEXT: ptrue p1.d, vl1
; CHECK-NEXT: ldr d2, [x0, x1, lsl #3]
-; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x2]
+; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: ldr z0, [x2]
; CHECK-NEXT: ldr d1, [x0]
-; CHECK-NEXT: sel z1.d, p2, z1.d, z0.d
-; CHECK-NEXT: mov z0.d, p1/m, d2
+; CHECK-NEXT: sel z1.d, p1, z1.d, z0.d
+; CHECK-NEXT: mov z0.d, p0/m, d2
; CHECK-NEXT: fadd z0.d, z1.d, z0.d
-; CHECK-NEXT: st1d { z0.d }, p0, [x3]
+; CHECK-NEXT: str z0, [x3]
; CHECK-NEXT: ret
%A = load <4 x double>, ptr %addr
%ld1 = load double, ptr %data
diff --git a/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll b/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
index 8fa23ed..9ef8552 100644
--- a/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
@@ -86,8 +86,7 @@ define <vscale x 2 x i64> @uunpklo_i32_invalid(ptr %b) #0 {
define <vscale x 2 x i64> @uunpklo_invalid_all(ptr %b) #0 {
; CHECK-LABEL: uunpklo_invalid_all:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: uunpklo z0.d, z0.s
; CHECK-NEXT: ret
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
@@ -183,8 +182,7 @@ define void @uzp1_invalid_all(<vscale x 2 x i64> %a, ptr %b) #0 {
; CHECK-LABEL: uzp1_invalid_all:
; CHECK: // %bb.0:
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%a.bc = bitcast <vscale x 2 x i64> %a to <vscale x 4 x i32>
%uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %a.bc, <vscale x 4 x i32> %a.bc)
diff --git a/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll b/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
index 5d5aa4b..16d26e4 100644
--- a/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
@@ -62,15 +62,14 @@ attributes #1 = { "target-features"="+sve" vscale_range(1,1) }
define void @func_vscale2_2(ptr %a, ptr %b) #2 {
; CHECK-LABEL: func_vscale2_2:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1w { z3.s }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
+; CHECK-NEXT: ldr z2, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z3, [x1, #1, mul vl]
; CHECK-NEXT: add z0.s, z0.s, z1.s
; CHECK-NEXT: add z1.s, z2.s, z3.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
-; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl]
+; CHECK-NEXT: str z0, [x0]
+; CHECK-NEXT: str z1, [x0, #1, mul vl]
; CHECK-NEXT: ret
%op1 = load <16 x i32>, ptr %a
%op2 = load <16 x i32>, ptr %b
@@ -107,11 +106,10 @@ attributes #3 = { "target-features"="+sve" vscale_range(2,4) }
define void @func_vscale4_4(ptr %a, ptr %b) #4 {
; CHECK-LABEL: func_vscale4_4:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: add z0.s, z0.s, z1.s
-; CHECK-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <16 x i32>, ptr %a
%op2 = load <16 x i32>, ptr %b
diff --git a/llvm/test/CodeGen/AArch64/sve2-bsl.ll b/llvm/test/CodeGen/AArch64/sve2-bsl.ll
index ef7d4ab..e524c5d 100644
--- a/llvm/test/CodeGen/AArch64/sve2-bsl.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-bsl.ll
@@ -93,3 +93,209 @@ define <vscale x 2 x i64> @nbsl_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
%4 = xor <vscale x 2 x i64> %3, splat(i64 -1)
ret <vscale x 2 x i64> %4
}
+
+; Test BSL/NBSL/BSL1N/BSL2N code generation for:
+; #define BSL(x,y,z) ( ((x) & (z)) | ( (y) & ~(z)))
+; #define NBSL(x,y,z) (~(((x) & (z)) | ( (y) & ~(z))))
+; #define BSL1N(x,y,z) ( (~(x) & (z)) | ( (y) & ~(z)))
+; #define BSL2N(x,y,z) ( ((x) & (z)) | (~(y) & ~(z)))
+
+define <vscale x 16 x i8> @codegen_bsl_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
+; CHECK-LABEL: codegen_bsl_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 16 x i8> %2, %0
+ %5 = xor <vscale x 16 x i8> %2, splat (i8 -1)
+ %6 = and <vscale x 16 x i8> %1, %5
+ %7 = or <vscale x 16 x i8> %4, %6
+ ret <vscale x 16 x i8> %7
+}
+
+define <vscale x 16 x i8> @codegen_nbsl_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
+; CHECK-LABEL: codegen_nbsl_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: nbsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 16 x i8> %2, %0
+ %5 = xor <vscale x 16 x i8> %2, splat (i8 -1)
+ %6 = and <vscale x 16 x i8> %1, %5
+ %7 = or <vscale x 16 x i8> %4, %6
+ %8 = xor <vscale x 16 x i8> %7, splat (i8 -1)
+ ret <vscale x 16 x i8> %8
+}
+
+define <vscale x 16 x i8> @codegen_bsl1n_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
+; CHECK-LABEL: codegen_bsl1n_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl1n z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = xor <vscale x 16 x i8> %0, splat (i8 -1)
+ %5 = and <vscale x 16 x i8> %2, %4
+ %6 = xor <vscale x 16 x i8> %2, splat (i8 -1)
+ %7 = and <vscale x 16 x i8> %1, %6
+ %8 = or <vscale x 16 x i8> %5, %7
+ ret <vscale x 16 x i8> %8
+}
+
+define <vscale x 16 x i8> @codegen_bsl2n_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
+; CHECK-LABEL: codegen_bsl2n_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl2n z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 16 x i8> %2, %0
+ %5 = or <vscale x 16 x i8> %2, %1
+ %6 = xor <vscale x 16 x i8> %5, splat (i8 -1)
+ %7 = or <vscale x 16 x i8> %4, %6
+ ret <vscale x 16 x i8> %7
+}
+
+define <vscale x 8 x i16> @codegen_bsl_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
+; CHECK-LABEL: codegen_bsl_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 8 x i16> %2, %0
+ %5 = xor <vscale x 8 x i16> %2, splat (i16 -1)
+ %6 = and <vscale x 8 x i16> %1, %5
+ %7 = or <vscale x 8 x i16> %4, %6
+ ret <vscale x 8 x i16> %7
+}
+
+define <vscale x 8 x i16> @codegen_nbsl_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
+; CHECK-LABEL: codegen_nbsl_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: nbsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 8 x i16> %2, %0
+ %5 = xor <vscale x 8 x i16> %2, splat (i16 -1)
+ %6 = and <vscale x 8 x i16> %1, %5
+ %7 = or <vscale x 8 x i16> %4, %6
+ %8 = xor <vscale x 8 x i16> %7, splat (i16 -1)
+ ret <vscale x 8 x i16> %8
+}
+
+define <vscale x 8 x i16> @codegen_bsl1n_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
+; CHECK-LABEL: codegen_bsl1n_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl1n z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = xor <vscale x 8 x i16> %0, splat (i16 -1)
+ %5 = and <vscale x 8 x i16> %2, %4
+ %6 = xor <vscale x 8 x i16> %2, splat (i16 -1)
+ %7 = and <vscale x 8 x i16> %1, %6
+ %8 = or <vscale x 8 x i16> %5, %7
+ ret <vscale x 8 x i16> %8
+}
+
+define <vscale x 8 x i16> @codegen_bsl2n_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
+; CHECK-LABEL: codegen_bsl2n_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl2n z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 8 x i16> %2, %0
+ %5 = or <vscale x 8 x i16> %2, %1
+ %6 = xor <vscale x 8 x i16> %5, splat (i16 -1)
+ %7 = or <vscale x 8 x i16> %4, %6
+ ret <vscale x 8 x i16> %7
+}
+
+define <vscale x 4 x i32> @codegen_bsl_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
+; CHECK-LABEL: codegen_bsl_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 4 x i32> %2, %0
+ %5 = xor <vscale x 4 x i32> %2, splat (i32 -1)
+ %6 = and <vscale x 4 x i32> %1, %5
+ %7 = or <vscale x 4 x i32> %4, %6
+ ret <vscale x 4 x i32> %7
+}
+
+define <vscale x 4 x i32> @codegen_nbsl_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
+; CHECK-LABEL: codegen_nbsl_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: nbsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 4 x i32> %2, %0
+ %5 = xor <vscale x 4 x i32> %2, splat (i32 -1)
+ %6 = and <vscale x 4 x i32> %1, %5
+ %7 = or <vscale x 4 x i32> %4, %6
+ %8 = xor <vscale x 4 x i32> %7, splat (i32 -1)
+ ret <vscale x 4 x i32> %8
+}
+
+define <vscale x 4 x i32> @codegen_bsl1n_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
+; CHECK-LABEL: codegen_bsl1n_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl1n z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = xor <vscale x 4 x i32> %0, splat (i32 -1)
+ %5 = and <vscale x 4 x i32> %2, %4
+ %6 = xor <vscale x 4 x i32> %2, splat (i32 -1)
+ %7 = and <vscale x 4 x i32> %1, %6
+ %8 = or <vscale x 4 x i32> %5, %7
+ ret <vscale x 4 x i32> %8
+}
+
+define <vscale x 4 x i32> @codegen_bsl2n_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
+; CHECK-LABEL: codegen_bsl2n_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl2n z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 4 x i32> %2, %0
+ %5 = or <vscale x 4 x i32> %2, %1
+ %6 = xor <vscale x 4 x i32> %5, splat (i32 -1)
+ %7 = or <vscale x 4 x i32> %4, %6
+ ret <vscale x 4 x i32> %7
+}
+
+define <vscale x 2 x i64> @codegen_bsl_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
+; CHECK-LABEL: codegen_bsl_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 2 x i64> %2, %0
+ %5 = xor <vscale x 2 x i64> %2, splat (i64 -1)
+ %6 = and <vscale x 2 x i64> %1, %5
+ %7 = or <vscale x 2 x i64> %4, %6
+ ret <vscale x 2 x i64> %7
+}
+
+define <vscale x 2 x i64> @codegen_nbsl_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
+; CHECK-LABEL: codegen_nbsl_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: nbsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 2 x i64> %2, %0
+ %5 = xor <vscale x 2 x i64> %2, splat (i64 -1)
+ %6 = and <vscale x 2 x i64> %1, %5
+ %7 = or <vscale x 2 x i64> %4, %6
+ %8 = xor <vscale x 2 x i64> %7, splat (i64 -1)
+ ret <vscale x 2 x i64> %8
+}
+
+define <vscale x 2 x i64> @codegen_bsl1n_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
+; CHECK-LABEL: codegen_bsl1n_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl1n z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = xor <vscale x 2 x i64> %0, splat (i64 -1)
+ %5 = and <vscale x 2 x i64> %2, %4
+ %6 = xor <vscale x 2 x i64> %2, splat (i64 -1)
+ %7 = and <vscale x 2 x i64> %1, %6
+ %8 = or <vscale x 2 x i64> %5, %7
+ ret <vscale x 2 x i64> %8
+}
+
+define <vscale x 2 x i64> @codegen_bsl2n_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
+; CHECK-LABEL: codegen_bsl2n_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bsl2n z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %4 = and <vscale x 2 x i64> %2, %0
+ %5 = or <vscale x 2 x i64> %2, %1
+ %6 = xor <vscale x 2 x i64> %5, splat (i64 -1)
+ %7 = or <vscale x 2 x i64> %4, %6
+ ret <vscale x 2 x i64> %7
+}
diff --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
index 14a578f..b0b3198 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
@@ -207,13 +207,11 @@ define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr b0, [x0]
; CHECK-GI-NEXT: ldr b1, [x1]
+; CHECK-GI-NEXT: add x8, x0, #1
+; CHECK-GI-NEXT: add x9, x1, #1
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
+; CHECK-GI-NEXT: ld1 { v1.b }[1], [x9]
; CHECK-GI-NEXT: add x8, x2, #1
-; CHECK-GI-NEXT: ldr b2, [x0, #1]
-; CHECK-GI-NEXT: ldr b3, [x1, #1]
-; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v1.b[0], v1.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
; CHECK-GI-NEXT: uqadd v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: st1 { v0.b }[0], [x2]
; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
diff --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
index ddb3332..54754e7 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
@@ -206,13 +206,11 @@ define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr b0, [x0]
; CHECK-GI-NEXT: ldr b1, [x1]
+; CHECK-GI-NEXT: add x8, x0, #1
+; CHECK-GI-NEXT: add x9, x1, #1
+; CHECK-GI-NEXT: ld1 { v0.b }[1], [x8]
+; CHECK-GI-NEXT: ld1 { v1.b }[1], [x9]
; CHECK-GI-NEXT: add x8, x2, #1
-; CHECK-GI-NEXT: ldr b2, [x0, #1]
-; CHECK-GI-NEXT: ldr b3, [x1, #1]
-; CHECK-GI-NEXT: mov v0.b[0], v0.b[0]
-; CHECK-GI-NEXT: mov v1.b[0], v1.b[0]
-; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
; CHECK-GI-NEXT: uqsub v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: st1 { v0.b }[0], [x2]
; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
index b3865ee..5733cf9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
@@ -625,7 +625,7 @@ define amdgpu_ps <4 x float> @image_bvh64_intersect_ray_a16_vgpr_descr(i64 %node
ret <4 x float> %r
}
-define amdgpu_kernel void @image_bvh_intersect_ray_nsa_reassign(ptr %p_node_ptr, ptr %p_ray, <4 x i32> inreg %tdescr) {
+define amdgpu_kernel void @image_bvh_intersect_ray_nsa_reassign(ptr %p_node_ptr, ptr %p_ray, <4 x i32> %tdescr) {
; GFX1030-LABEL: image_bvh_intersect_ray_nsa_reassign:
; GFX1030: ; %bb.0:
; GFX1030-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
@@ -740,7 +740,7 @@ define amdgpu_kernel void @image_bvh_intersect_ray_nsa_reassign(ptr %p_node_ptr,
ret void
}
-define amdgpu_kernel void @image_bvh_intersect_ray_a16_nsa_reassign(ptr %p_node_ptr, ptr %p_ray, <4 x i32> inreg %tdescr) {
+define amdgpu_kernel void @image_bvh_intersect_ray_a16_nsa_reassign(ptr %p_node_ptr, ptr %p_ray, <4 x i32> %tdescr) {
; GFX1030-LABEL: image_bvh_intersect_ray_a16_nsa_reassign:
; GFX1030: ; %bb.0:
; GFX1030-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
@@ -845,7 +845,7 @@ define amdgpu_kernel void @image_bvh_intersect_ray_a16_nsa_reassign(ptr %p_node_
ret void
}
-define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4 x i32> inreg %tdescr) {
+define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4 x i32> %tdescr) {
; GFX1030-LABEL: image_bvh64_intersect_ray_nsa_reassign:
; GFX1030: ; %bb.0:
; GFX1030-NEXT: s_clause 0x1
@@ -956,7 +956,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4
ret void
}
-define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray, <4 x i32> inreg %tdescr) {
+define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray, <4 x i32> %tdescr) {
; GFX1030-LABEL: image_bvh64_intersect_ray_a16_nsa_reassign:
; GFX1030: ; %bb.0:
; GFX1030-NEXT: s_clause 0x1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.atomic.fadd-with-ret.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.atomic.fadd-with-ret.ll
index b46a827..dadd971 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.atomic.fadd-with-ret.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.atomic.fadd-with-ret.ll
@@ -8,7 +8,7 @@ declare <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half>, ptr
; GFX90A-LABEL: {{^}}buffer_atomic_add_f32_rtn:
; GFX90A: buffer_atomic_add_f32 v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9:]+}}], s{{[0-9]+}} offen glc
-define amdgpu_kernel void @buffer_atomic_add_f32_rtn(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 %soffset) {
+define amdgpu_kernel void @buffer_atomic_add_f32_rtn(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset) {
main_body:
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0)
store float %ret, ptr poison
@@ -17,7 +17,7 @@ main_body:
; GFX90A-LABEL: {{^}}buffer_atomic_add_v2f16_rtn:
; GFX90A: buffer_atomic_pk_add_f16 v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9:]+}}], s{{[0-9]+}} offen glc
-define amdgpu_kernel void @buffer_atomic_add_v2f16_rtn(<2 x half> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
+define amdgpu_kernel void @buffer_atomic_add_v2f16_rtn(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset) {
main_body:
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0)
store <2 x half> %ret, ptr poison
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
index 1ef7d35..8ae7b58 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
define <18 x float> @bitcast_v18i32_to_v18f32(<18 x i32> %a, i32 %b) {
; GCN-LABEL: bitcast_v18i32_to_v18f32:
@@ -1227,113 +1228,145 @@ define <36 x i16> @bitcast_v18i32_to_v36i16(<18 x i32> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v18, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v18i32_to_v36i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: ; implicit-def: $vgpr19
-; GFX11-NEXT: ; implicit-def: $vgpr18
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB6_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB6_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v18i32_to_v36i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB6_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v18i32_to_v36i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr19
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr18
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1963,73 +1996,105 @@ define <18 x i32> @bitcast_v36i16_to_v18i32(<36 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36i16_to_v18i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB7_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB7_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36i16_to_v18i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB7_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36i16_to_v18i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB7_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2610,113 +2675,145 @@ define <36 x half> @bitcast_v18i32_to_v36f16(<18 x i32> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v18, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v18i32_to_v36f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: ; implicit-def: $vgpr19
-; GFX11-NEXT: ; implicit-def: $vgpr18
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB8_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB8_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v18i32_to_v36f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB8_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v18i32_to_v36f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr19
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr18
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3423,73 +3520,105 @@ define <18 x i32> @bitcast_v36f16_to_v18i32(<36 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36f16_to_v18i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB9_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB9_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36f16_to_v18i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB9_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36f16_to_v18i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB9_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4436,104 +4565,127 @@ define <36 x i16> @bitcast_v18f32_to_v36i16(<18 x float> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v18, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v18f32_to_v36i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: ; implicit-def: $vgpr19
-; GFX11-NEXT: ; implicit-def: $vgpr18
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB14_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB14_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v18f32_to_v36i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB14_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v18f32_to_v36i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr19
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr18
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5163,73 +5315,105 @@ define <18 x float> @bitcast_v36i16_to_v18f32(<36 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36i16_to_v18f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB15_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB15_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36i16_to_v18f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36i16_to_v18f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5810,104 +5994,127 @@ define <36 x half> @bitcast_v18f32_to_v36f16(<18 x float> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v18, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v18f32_to_v36f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: ; implicit-def: $vgpr19
-; GFX11-NEXT: ; implicit-def: $vgpr18
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB16_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB16_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v18f32_to_v36f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB16_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v18f32_to_v36f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr19
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr18
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6614,73 +6821,105 @@ define <18 x float> @bitcast_v36f16_to_v18f32(<36 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36f16_to_v18f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB17_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB17_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36f16_to_v18f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB17_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36f16_to_v18f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB17_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7361,118 +7600,155 @@ define <36 x i16> @bitcast_v9i64_to_v36i16(<9 x i64> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v18, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v9i64_to_v36i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: ; implicit-def: $vgpr19
-; GFX11-NEXT: ; implicit-def: $vgpr18
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB20_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB20_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v9i64_to_v36i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB20_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v9i64_to_v36i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr19
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr18
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8102,73 +8378,105 @@ define <9 x i64> @bitcast_v36i16_to_v9i64(<36 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36i16_to_v9i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB21_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB21_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36i16_to_v9i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB21_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36i16_to_v9i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB21_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8749,118 +9057,155 @@ define <36 x half> @bitcast_v9i64_to_v36f16(<9 x i64> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v18, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v9i64_to_v36f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: ; implicit-def: $vgpr19
-; GFX11-NEXT: ; implicit-def: $vgpr18
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB22_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB22_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v9i64_to_v36f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB22_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v9i64_to_v36f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr19
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr18
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9567,73 +9912,105 @@ define <9 x i64> @bitcast_v36f16_to_v9i64(<36 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36f16_to_v9i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB23_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB23_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36f16_to_v9i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36f16_to_v9i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10034,104 +10411,127 @@ define <36 x i16> @bitcast_v9f64_to_v36i16(<9 x double> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v18, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v9f64_to_v36i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: ; implicit-def: $vgpr19
-; GFX11-NEXT: ; implicit-def: $vgpr18
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB24_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB24_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v9f64_to_v36i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB24_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v9f64_to_v36i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr19
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr18
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10761,73 +11161,105 @@ define <9 x double> @bitcast_v36i16_to_v9f64(<36 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36i16_to_v9f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB25_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB25_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36i16_to_v9f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB25_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36i16_to_v9f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB25_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11372,104 +11804,127 @@ define <36 x half> @bitcast_v9f64_to_v36f16(<9 x double> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v18, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v9f64_to_v36f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: ; implicit-def: $vgpr19
-; GFX11-NEXT: ; implicit-def: $vgpr18
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB26_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: .LBB26_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v9f64_to_v36f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB26_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v9f64_to_v36f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v18
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr19
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr18
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v35, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v26, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v25, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v24, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v23, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v22, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v21, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v20, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v19, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v18, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12176,73 +12631,105 @@ define <9 x double> @bitcast_v36f16_to_v9f64(<36 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36f16_to_v9f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB27_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB27_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36f16_to_v9f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36f16_to_v9f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v32, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v33, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v34, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v35, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v31, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v30, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v29, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v28, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v27, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v26, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v25, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v24, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v23, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v22, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v21, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v20, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v19, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12883,109 +13370,141 @@ define <36 x half> @bitcast_v36i16_to_v36f16(<36 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v35, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36i16_to_v36f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB28_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v17, v36, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v34, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v32, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v30, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v28, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v26, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v25, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v24, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v19, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v20, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v21, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v22, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v23, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: .LBB28_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v19, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v20, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v21, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v22, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v23, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v24, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v25, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v26, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v28, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v30, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v32, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v34, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v36, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36i16_to_v36f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB28_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36i16_to_v36f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v36, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v34, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v32, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v30, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v28, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v26, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v25, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v24, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v19, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v20, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v21, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v23, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: .LBB28_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v19, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v20, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v21, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v23, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v24, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v25, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v26, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v28, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v30, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v32, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v34, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v36, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13530,109 +14049,141 @@ define <36 x i16> @bitcast_v36f16_to_v36i16(<36 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v17, v35, v17, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36f16_to_v36i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v18
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v17, v36, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v34, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v32, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v30, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v28, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v26, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v25, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v24, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v19, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v20, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v21, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v22, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v23, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: .LBB29_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v19, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v20, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v21, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v22, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v23, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v24, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v25, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v26, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v28, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v30, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v32, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v34, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v36, v17, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36f16_to_v36i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36f16_to_v36i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v18
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v36, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v34, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v32, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v30, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v28, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v26, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v25, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v24, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v19, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v20, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v21, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v23, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v19, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v20, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v21, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v23, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v24, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v25, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v26, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v27, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v28, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v30, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v32, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v34, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v36, v17, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
index 8f9de9e..67e035b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
define <20 x float> @bitcast_v20i32_to_v20f32(<20 x i32> %a, i32 %b) {
; GCN-LABEL: bitcast_v20i32_to_v20f32:
@@ -1310,123 +1311,157 @@ define <40 x i16> @bitcast_v20i32_to_v40i16(<20 x i32> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v20, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v20i32_to_v40i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB6_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB6_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v20i32_to_v40i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB6_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v20i32_to_v40i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2160,79 +2195,113 @@ define <20 x i32> @bitcast_v40i16_to_v20i32(<40 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40i16_to_v20i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB7_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB7_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v20i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB7_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v20i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB7_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2879,123 +2948,157 @@ define <40 x half> @bitcast_v20i32_to_v40f16(<20 x i32> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v20, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v20i32_to_v40f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB8_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB8_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v20i32_to_v40f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB8_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v20i32_to_v40f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3814,79 +3917,113 @@ define <20 x i32> @bitcast_v40f16_to_v20i32(<40 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40f16_to_v20i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB9_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB9_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v20i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB9_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v20i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB9_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4899,113 +5036,137 @@ define <40 x i16> @bitcast_v20f32_to_v40i16(<20 x float> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v20, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v20f32_to_v40i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB14_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB14_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v20f32_to_v40i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB14_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v20f32_to_v40i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5739,79 +5900,113 @@ define <20 x float> @bitcast_v40i16_to_v20f32(<40 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40i16_to_v20f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB15_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB15_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v20f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v20f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6458,113 +6653,137 @@ define <40 x half> @bitcast_v20f32_to_v40f16(<20 x float> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v20, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v20f32_to_v40f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB16_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB16_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v20f32_to_v40f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB16_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v20f32_to_v40f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7383,79 +7602,113 @@ define <20 x float> @bitcast_v40f16_to_v20f32(<40 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40f16_to_v20f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB17_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB17_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v20f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB17_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v20f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB17_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8188,128 +8441,167 @@ define <40 x i16> @bitcast_v10i64_to_v40i16(<10 x i64> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v20, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v10i64_to_v40i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB20_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB20_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v10i64_to_v40i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB20_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v10i64_to_v40i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9043,79 +9335,113 @@ define <10 x i64> @bitcast_v40i16_to_v10i64(<40 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40i16_to_v10i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB21_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB21_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v10i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB21_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v10i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB21_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9762,128 +10088,167 @@ define <40 x half> @bitcast_v10i64_to_v40f16(<10 x i64> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v20, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v10i64_to_v40f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB22_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB22_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v10i64_to_v40f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB22_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v10i64_to_v40f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10702,79 +11067,113 @@ define <10 x i64> @bitcast_v40f16_to_v10i64(<40 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40f16_to_v10i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB23_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB23_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v10i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v10i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11212,113 +11611,137 @@ define <40 x i16> @bitcast_v10f64_to_v40i16(<10 x double> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v20, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v10f64_to_v40i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB24_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB24_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v10f64_to_v40i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB24_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v10f64_to_v40i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12052,79 +12475,113 @@ define <10 x double> @bitcast_v40i16_to_v10f64(<40 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40i16_to_v10f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB25_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB25_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v10f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB25_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v10f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB25_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12731,113 +13188,137 @@ define <40 x half> @bitcast_v10f64_to_v40f16(<10 x double> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v20, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v10f64_to_v40f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr20
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB26_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v0
-; GFX11-NEXT: .LBB26_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v10f64_to_v40f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB26_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v10f64_to_v40f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v39, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v37, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v36, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v29, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v28, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v27, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v26, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v25, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v24, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v23, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v22, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v21, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v20, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13656,79 +14137,113 @@ define <10 x double> @bitcast_v40f16_to_v10f64(<40 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40f16_to_v10f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
-; GFX11-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB27_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB27_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v10f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v10f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v20
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v37, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v38, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v39, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v36, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v35, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v34, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v33, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v32, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v31, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v30, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v29, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v28, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v27, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v26, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v25, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v24, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v23, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v22, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v21, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14482,119 +14997,153 @@ define <40 x half> @bitcast_v40i16_to_v40f16(<40 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v39, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40i16_to_v40f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v20
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB28_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v39, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v37, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v35, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v33, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v31, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v29, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v27, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v26, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v21, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v22, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v23, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v24, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v25, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: .LBB28_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v21, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v22, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v23, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v24, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v25, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v26, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v27, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v29, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v31, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v33, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v35, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v37, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v39, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v40f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB28_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v40f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v39, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v37, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v35, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v33, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v31, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v29, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v27, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v26, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v21, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v22, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v23, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v24, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v25, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: .LBB28_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v21, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v22, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v23, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v24, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v25, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v26, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v27, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v29, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v31, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v33, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v35, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v37, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v39, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15206,119 +15755,153 @@ define <40 x i16> @bitcast_v40f16_to_v40i16(<40 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v19, v39, v19, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v40f16_to_v40i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v20
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v39, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v37, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v35, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v33, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v31, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v29, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v27, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v26, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v21, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v22, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v23, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v24, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v25, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: .LBB29_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v21, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v22, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v23, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v24, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v25, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v26, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v27, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v29, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v31, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v33, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v35, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v37, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v39, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v40i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v40i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v20
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v39, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v37, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v35, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v33, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v31, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v29, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v27, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v26, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v21, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v22, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v23, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v24, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v25, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v21, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v22, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v23, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v24, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v25, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v26, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v27, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v28, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v29, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v30, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v31, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v33, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v35, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v37, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v39, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
index a0fe407..08590a3 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
define <22 x float> @bitcast_v22i32_to_v22f32(<22 x i32> %a, i32 %b) {
; GCN-LABEL: bitcast_v22i32_to_v22f32:
@@ -1394,133 +1395,169 @@ define <44 x i16> @bitcast_v22i32_to_v44i16(<22 x i32> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v22, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v22i32_to_v44i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB6_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB6_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v22i32_to_v44i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB6_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v22i32_to_v44i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2357,85 +2394,121 @@ define <22 x i32> @bitcast_v44i16_to_v22i32(<44 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44i16_to_v22i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB7_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB7_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v22i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB7_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v22i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB7_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3167,133 +3240,169 @@ define <44 x half> @bitcast_v22i32_to_v44f16(<22 x i32> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v22, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v22i32_to_v44f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB8_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB8_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v22i32_to_v44f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB8_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v22i32_to_v44f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4224,85 +4333,121 @@ define <22 x i32> @bitcast_v44f16_to_v22i32(<44 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44f16_to_v22i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB9_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB9_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v22i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB9_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v22i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB9_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5382,122 +5527,147 @@ define <44 x i16> @bitcast_v22f32_to_v44i16(<22 x float> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v22, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v22f32_to_v44i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB14_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB14_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v22f32_to_v44i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB14_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v22f32_to_v44i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6334,85 +6504,121 @@ define <22 x float> @bitcast_v44i16_to_v22f32(<44 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44i16_to_v22f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB15_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB15_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v22f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v22f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7144,122 +7350,147 @@ define <44 x half> @bitcast_v22f32_to_v44f16(<22 x float> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v22, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v22f32_to_v44f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB16_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB16_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v22f32_to_v44f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB16_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v22f32_to_v44f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8190,85 +8421,121 @@ define <22 x float> @bitcast_v44f16_to_v22f32(<44 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44f16_to_v22f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB17_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB17_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v22f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB17_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v22f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB17_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9054,139 +9321,181 @@ define <44 x i16> @bitcast_v11i64_to_v44i16(<11 x i64> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v22, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v11i64_to_v44i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB20_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB20_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v11i64_to_v44i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB20_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v11i64_to_v44i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10023,85 +10332,121 @@ define <11 x i64> @bitcast_v44i16_to_v11i64(<44 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44i16_to_v11i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB21_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB21_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v11i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB21_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v11i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB21_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10833,139 +11178,181 @@ define <44 x half> @bitcast_v11i64_to_v44f16(<11 x i64> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v22, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v11i64_to_v44f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB22_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB22_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v11i64_to_v44f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB22_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v11i64_to_v44f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11896,85 +12283,121 @@ define <11 x i64> @bitcast_v44f16_to_v11i64(<44 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44f16_to_v11i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB23_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB23_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v11i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v11i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12449,122 +12872,147 @@ define <44 x i16> @bitcast_v11f64_to_v44i16(<11 x double> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v22, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v11f64_to_v44i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB24_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB24_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v11f64_to_v44i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB24_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v11f64_to_v44i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13401,85 +13849,121 @@ define <11 x double> @bitcast_v44i16_to_v11f64(<44 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44i16_to_v11f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB25_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB25_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v11f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB25_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v11f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB25_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14167,122 +14651,147 @@ define <44 x half> @bitcast_v11f64_to_v44f16(<11 x double> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v22, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v11f64_to_v44f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB26_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: .LBB26_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v11f64_to_v44f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB26_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v11f64_to_v44f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v51, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v49, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v48, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v39, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v38, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v32, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v31, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v30, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v29, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v28, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v27, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v26, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v25, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v24, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v23, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v22, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15213,85 +15722,121 @@ define <11 x double> @bitcast_v44f16_to_v11f64(<44 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44f16_to_v11f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB27_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB27_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v11f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v11f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v49, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v50, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v51, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v48, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v39, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v38, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v37, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v36, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v35, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v34, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v33, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v32, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v31, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v30, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v29, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v28, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v27, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v26, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v25, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v24, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v23, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16159,129 +16704,165 @@ define <44 x half> @bitcast_v44i16_to_v44f16(<44 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v51, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44i16_to_v44f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v22
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB28_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v21, v52, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v51, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v50, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v48, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v38, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v36, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v34, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v32, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v30, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v28, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v23, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v24, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v25, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v26, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v27, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v21
-; GFX11-NEXT: .LBB28_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v23, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v24, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v25, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v26, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v27, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v28, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v30, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v32, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v34, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v36, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v38, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v48, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v50, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v51, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v52, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v44f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB28_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v44f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v52, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v51, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v50, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v48, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v38, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v36, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v34, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v32, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v30, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v28, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v23, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v24, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v26, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v27, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v21
+; GFX11-FAKE16-NEXT: .LBB28_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v23, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v24, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v26, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v27, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v28, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v30, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v32, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v34, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v36, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v38, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v48, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v50, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v51, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v52, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16947,129 +17528,165 @@ define <44 x i16> @bitcast_v44f16_to_v44i16(<44 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v51, v21, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v44f16_to_v44i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v22
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v21, v52, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v51, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v50, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v48, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v38, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v36, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v34, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v32, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v30, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v28, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v23, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v24, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v25, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v26, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v27, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v21
-; GFX11-NEXT: .LBB29_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v23, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v24, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v25, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v26, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v27, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v28, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v30, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v32, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v34, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v36, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v38, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v48, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v50, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v51, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v52, v21, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v44i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v44i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v22
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v52, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v51, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v50, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v48, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v38, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v36, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v34, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v32, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v30, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v28, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v23, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v24, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v26, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v27, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v21
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v23, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v24, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v26, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v27, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v28, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v29, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v30, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v31, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v32, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v33, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v34, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v36, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v38, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v48, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v50, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v51, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v52, v21, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
index 87fa5af..b1a194f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
define <24 x float> @bitcast_v24i32_to_v24f32(<24 x i32> %a, i32 %b) {
; GCN-LABEL: bitcast_v24i32_to_v24f32:
@@ -1508,143 +1509,181 @@ define <48 x i16> @bitcast_v24i32_to_v48i16(<24 x i32> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v24, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v24i32_to_v48i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB6_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB6_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v24i32_to_v48i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB6_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v24i32_to_v48i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2579,91 +2618,129 @@ define <24 x i32> @bitcast_v48i16_to_v24i32(<48 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48i16_to_v24i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB7_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB7_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v24i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB7_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v24i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB7_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3505,143 +3582,181 @@ define <48 x half> @bitcast_v24i32_to_v48f16(<24 x i32> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v24, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v24i32_to_v48f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB8_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB8_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v24i32_to_v48f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB8_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v24i32_to_v48f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4696,91 +4811,129 @@ define <24 x i32> @bitcast_v48f16_to_v24i32(<48 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48f16_to_v24i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB9_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB9_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v24i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB9_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v24i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB9_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5957,131 +6110,157 @@ define <48 x i16> @bitcast_v24f32_to_v48i16(<24 x float> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v24, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v24f32_to_v48i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB14_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB14_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v24f32_to_v48i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB14_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v24f32_to_v48i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7016,91 +7195,129 @@ define <24 x float> @bitcast_v48i16_to_v24f32(<48 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48i16_to_v24f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB15_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB15_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v24f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v24f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7942,131 +8159,157 @@ define <48 x half> @bitcast_v24f32_to_v48f16(<24 x float> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v24, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v24f32_to_v48f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB16_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB16_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v24f32_to_v48f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB16_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v24f32_to_v48f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9121,91 +9364,129 @@ define <24 x float> @bitcast_v48f16_to_v24f32(<48 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48f16_to_v24f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB17_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB17_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v24f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB17_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v24f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB17_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10074,149 +10355,193 @@ define <48 x i16> @bitcast_v12i64_to_v48i16(<12 x i64> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v24, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v12i64_to_v48i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB20_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB20_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v12i64_to_v48i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB20_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v12i64_to_v48i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11151,91 +11476,129 @@ define <12 x i64> @bitcast_v48i16_to_v12i64(<48 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48i16_to_v12i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB21_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB21_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v12i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB21_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v12i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB21_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12077,149 +12440,193 @@ define <48 x half> @bitcast_v12i64_to_v48f16(<12 x i64> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v24, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v12i64_to_v48f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB22_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB22_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v12i64_to_v48f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB22_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v12i64_to_v48f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13274,91 +13681,129 @@ define <12 x i64> @bitcast_v48f16_to_v12i64(<48 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48f16_to_v12i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB23_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB23_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v12i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v12i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13901,131 +14346,157 @@ define <48 x i16> @bitcast_v12f64_to_v48i16(<12 x double> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v24, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v12f64_to_v48i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB24_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB24_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v12f64_to_v48i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB24_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v12f64_to_v48i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14960,91 +15431,129 @@ define <12 x double> @bitcast_v48i16_to_v12f64(<48 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48i16_to_v12f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB25_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB25_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v12f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB25_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v12f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB25_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15838,131 +16347,157 @@ define <48 x half> @bitcast_v12f64_to_v48f16(<12 x double> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v24, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v12f64_to_v48f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB26_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: .LBB26_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v12f64_to_v48f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB26_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v12f64_to_v48f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v55, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v53, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v52, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v51, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v50, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v49, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v48, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v35, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v34, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v33, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v32, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v31, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v30, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v29, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v28, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v27, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v26, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v25, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v24, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17017,91 +17552,129 @@ define <12 x double> @bitcast_v48f16_to_v12f64(<48 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48f16_to_v12f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
-; GFX11-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB27_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB27_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v12f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v12f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v24
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v53, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v54, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v55, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v52, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v51, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v50, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v49, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v48, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v39, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v38, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v37, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v36, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v35, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v34, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v33, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v32, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v31, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v30, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v29, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v28, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v27, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v26, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v25, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18080,139 +18653,177 @@ define <48 x half> @bitcast_v48i16_to_v48f16(<48 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v55, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48i16_to_v48f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v24
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB28_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v23, v64, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v55, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v54, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v53, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v52, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v51, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v49, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v39, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v37, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v35, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v33, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v31, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v25, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v26, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v27, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v28, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v29, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v23
-; GFX11-NEXT: .LBB28_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v25, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v26, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v27, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v28, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v29, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v31, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v33, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v35, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v37, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v39, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v49, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v51, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v52, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v53, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v54, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v55, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v64, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v48f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB28_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v48f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v64, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v55, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v54, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v53, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v52, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v51, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v49, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v39, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v37, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v35, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v33, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v31, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v25, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v26, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v27, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v28, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v29, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v23
+; GFX11-FAKE16-NEXT: .LBB28_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v25, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v26, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v27, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v28, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v29, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v31, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v33, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v35, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v37, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v39, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v49, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v51, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v52, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v53, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v54, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v55, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v64, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18944,139 +19555,177 @@ define <48 x i16> @bitcast_v48f16_to_v48i16(<48 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v23, v55, v23, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v48f16_to_v48i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v24
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v23, v64, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v55, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v54, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v53, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v52, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v51, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v49, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v39, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v37, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v35, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v33, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v31, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v25, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v26, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v27, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v28, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v29, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v23
-; GFX11-NEXT: .LBB29_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v25, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v26, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v27, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v28, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v29, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v31, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v33, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v35, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v37, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v39, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v49, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v51, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v52, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v53, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v54, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v55, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v64, v23, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v48i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v48i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v24
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v64, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v55, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v54, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v53, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v52, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v51, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v49, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v39, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v37, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v35, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v33, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v31, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v25, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v26, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v27, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v28, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v29, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v23
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v25, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v26, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v27, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v28, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v29, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v30, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v31, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v32, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v33, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v34, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v35, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v36, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v37, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v39, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v49, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v51, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v52, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v53, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v54, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v55, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v64, v23, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
index e4f8a96..75baa36 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
define <26 x float> @bitcast_v26i32_to_v26f32(<26 x i32> %a, i32 %b) {
; GCN-LABEL: bitcast_v26i32_to_v26f32:
@@ -1610,153 +1611,193 @@ define <52 x i16> @bitcast_v26i32_to_v52i16(<26 x i32> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v26i32_to_v52i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB6_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB6_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v26i32_to_v52i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB6_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v26i32_to_v52i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2792,97 +2833,137 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52i16_to_v26i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB7_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB7_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v26i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB7_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v26i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB7_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3823,153 +3904,193 @@ define <52 x half> @bitcast_v26i32_to_v52f16(<26 x i32> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v26i32_to_v52f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB8_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB8_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v26i32_to_v52f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB8_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v26i32_to_v52f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5169,97 +5290,137 @@ define <26 x i32> @bitcast_v52f16_to_v26i32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52f16_to_v26i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB9_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB9_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v26i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB9_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v26i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB9_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6521,140 +6682,167 @@ define <52 x i16> @bitcast_v26f32_to_v52i16(<26 x float> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v26f32_to_v52i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB14_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB14_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v26f32_to_v52i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB14_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v26f32_to_v52i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7690,97 +7878,137 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52i16_to_v26f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB15_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB15_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v26f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v26f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8721,140 +8949,167 @@ define <52 x half> @bitcast_v26f32_to_v52f16(<26 x float> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v26f32_to_v52f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB16_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB16_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v26f32_to_v52f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB16_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v26f32_to_v52f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10054,97 +10309,137 @@ define <26 x float> @bitcast_v52f16_to_v26f32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52f16_to_v26f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB17_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB17_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v26f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB17_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v26f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB17_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11084,160 +11379,207 @@ define <52 x i16> @bitcast_v13i64_to_v52i16(<13 x i64> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v13i64_to_v52i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB20_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB20_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v13i64_to_v52i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB20_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v13i64_to_v52i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12273,97 +12615,137 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52i16_to_v13i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB21_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB21_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v13i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB21_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v13i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB21_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13304,160 +13686,207 @@ define <52 x half> @bitcast_v13i64_to_v52f16(<13 x i64> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v13i64_to_v52f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB22_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB22_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v13i64_to_v52f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB22_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v13i64_to_v52f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14657,97 +15086,137 @@ define <13 x i64> @bitcast_v52f16_to_v13i64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52f16_to_v13i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB23_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB23_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v13i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v13i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15345,140 +15814,167 @@ define <52 x i16> @bitcast_v13f64_to_v52i16(<13 x double> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v13f64_to_v52i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB24_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB24_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v13f64_to_v52i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB24_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v13f64_to_v52i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16514,97 +17010,137 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52i16_to_v13f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB25_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB25_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v13f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB25_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v13f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB25_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17493,140 +18029,167 @@ define <52 x half> @bitcast_v13f64_to_v52f16(<13 x double> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v13f64_to_v52f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB26_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v0
-; GFX11-NEXT: .LBB26_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v13f64_to_v52f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB26_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v13f64_to_v52f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v67, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v65, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v64, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v55, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v54, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v53, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v52, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v51, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v50, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v38, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v37, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v36, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v35, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v34, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v33, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v32, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v31, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v30, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v29, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v28, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v27, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v26, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18826,97 +19389,137 @@ define <13 x double> @bitcast_v52f16_to_v13f64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52f16_to_v13f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
-; GFX11-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB27_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB27_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v13f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v13f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v26
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v65, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v66, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v67, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v64, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v55, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v54, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v53, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v52, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v51, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v50, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v49, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v48, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v39, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v38, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v37, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v36, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v35, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v34, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v33, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v32, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v31, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v30, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v29, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v28, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v27, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20030,149 +20633,189 @@ define <52 x half> @bitcast_v52i16_to_v52f16(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52i16_to_v52f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v26
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB28_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v25, v68, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v67, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v66, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v65, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v64, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v55, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v54, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v53, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v52, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v50, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v48, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v38, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v36, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v34, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v32, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v27, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v28, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v29, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v30, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v25
-; GFX11-NEXT: .LBB28_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v27, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v28, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v29, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v30, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v32, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v34, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v36, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v38, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v48, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v50, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v52, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v53, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v54, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v55, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v64, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v65, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v66, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v67, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v68, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v52f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB28_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v52f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v68, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v67, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v66, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v65, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v64, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v55, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v54, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v53, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v52, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v50, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v48, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v38, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v36, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v34, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v32, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v27, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v28, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v29, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v30, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v25
+; GFX11-FAKE16-NEXT: .LBB28_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v27, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v28, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v29, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v30, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v32, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v34, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v36, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v38, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v48, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v50, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v52, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v53, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v54, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v55, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v64, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v65, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v66, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v67, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v68, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20999,149 +21642,189 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v52f16_to_v52i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v26
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v25, v68, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v67, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v66, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v65, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v64, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v55, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v54, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v53, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v52, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v50, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v48, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v38, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v36, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v34, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v32, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v27, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v28, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v29, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v30, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v25
-; GFX11-NEXT: .LBB29_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v27, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v28, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v29, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v30, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v32, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v34, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v36, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v38, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v48, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v50, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v52, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v53, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v54, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v55, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v64, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v65, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v66, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v67, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v68, v25, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v52i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v52i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v26
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v68, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v67, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v66, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v65, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v64, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v55, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v54, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v53, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v52, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v50, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v48, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v38, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v36, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v34, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v32, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v27, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v28, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v29, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v30, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v25
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v27, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v28, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v29, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v30, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v31, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v32, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v33, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v34, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v35, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v36, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v37, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v38, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v39, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v48, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v50, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v52, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v53, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v54, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v55, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v64, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v65, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v66, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v67, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v68, v25, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
index d1531b3..cdbe26b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
define <28 x float> @bitcast_v28i32_to_v28f32(<28 x i32> %a, i32 %b) {
; GCN-LABEL: bitcast_v28i32_to_v28f32:
@@ -1716,163 +1717,205 @@ define <56 x i16> @bitcast_v28i32_to_v56i16(<28 x i32> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v28i32_to_v56i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB6_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 3, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB6_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v28i32_to_v56i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 3, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 3, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB6_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v28i32_to_v56i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 3, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 3, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3010,103 +3053,145 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56i16_to_v28i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB7_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB7_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v28i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB7_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v28i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB7_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4127,163 +4212,205 @@ define <56 x half> @bitcast_v28i32_to_v56f16(<28 x i32> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v28i32_to_v56f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB8_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 3, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB8_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v28i32_to_v56f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 3, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 3, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB8_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v28i32_to_v56f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 3, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 3, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5604,103 +5731,145 @@ define <28 x i32> @bitcast_v56f16_to_v28i32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56f16_to_v28i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB9_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB9_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v28i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB9_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v28i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB9_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7051,149 +7220,177 @@ define <56 x i16> @bitcast_v28f32_to_v56i16(<28 x float> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v28f32_to_v56i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB14_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB14_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v28f32_to_v56i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB14_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v28f32_to_v56i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8331,103 +8528,145 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56i16_to_v28f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB15_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB15_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v28f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v28f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9448,149 +9687,177 @@ define <56 x half> @bitcast_v28f32_to_v56f16(<28 x float> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v28f32_to_v56f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB16_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB16_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v28f32_to_v56f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB16_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v28f32_to_v56f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10911,103 +11178,145 @@ define <28 x float> @bitcast_v56f16_to_v28f32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56f16_to_v28f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB17_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB17_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v28f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB17_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v28f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB17_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12022,170 +12331,219 @@ define <56 x i16> @bitcast_v14i64_to_v56i16(<14 x i64> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v14i64_to_v56i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB20_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB20_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v14i64_to_v56i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB20_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v14i64_to_v56i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13323,103 +13681,145 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56i16_to_v14i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB21_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB21_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v14i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB21_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v14i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB21_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14440,170 +14840,219 @@ define <56 x half> @bitcast_v14i64_to_v56f16(<14 x i64> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v14i64_to_v56f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB22_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB22_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v14i64_to_v56f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB22_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v14i64_to_v56f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15924,103 +16373,145 @@ define <14 x i64> @bitcast_v56f16_to_v14i64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56f16_to_v14i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB23_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB23_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v14i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v14i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16675,149 +17166,177 @@ define <56 x i16> @bitcast_v14f64_to_v56i16(<14 x double> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v14f64_to_v56i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB24_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB24_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v14f64_to_v56i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB24_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v14f64_to_v56i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17955,103 +18474,145 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56i16_to_v14f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB25_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB25_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v14f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB25_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v14f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB25_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19016,149 +19577,177 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v14f64_to_v56f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB26_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v0
-; GFX11-NEXT: .LBB26_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v14f64_to_v56f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB26_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v14f64_to_v56f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v71, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v69, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v68, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v67, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v66, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v65, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v64, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v55, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v54, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v53, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v52, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v49, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v48, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v39, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v38, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v37, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v36, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v35, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v34, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v33, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v32, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v31, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v30, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v29, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v28, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20479,103 +21068,145 @@ define <14 x double> @bitcast_v56f16_to_v14f64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56f16_to_v14f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
-; GFX11-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB27_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB27_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v14f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v14f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v28
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v69, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v70, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v71, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v68, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v67, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v66, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v65, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v64, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v55, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v54, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v53, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v52, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v51, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v50, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v49, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v48, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v39, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v38, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v37, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v36, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v35, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v34, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v33, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v32, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v31, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v30, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v29, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21817,159 +22448,201 @@ define <56 x half> @bitcast_v56i16_to_v56f16(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56i16_to_v56f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v28
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB28_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v27, v80, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v71, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v70, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v69, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v68, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v67, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v66, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v65, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v64, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v55, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v54, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v53, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v51, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v49, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v39, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v37, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v35, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v29, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v30, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v31, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v33, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v27
-; GFX11-NEXT: .LBB28_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v29, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v30, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v31, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v33, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v35, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v37, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v39, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v49, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v51, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v53, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v54, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v55, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v64, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v65, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v66, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v67, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v68, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v69, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v70, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v71, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v80, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v56f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB28_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v56f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v80, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v71, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v70, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v69, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v68, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v67, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v66, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v65, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v64, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v55, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v54, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v53, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v51, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v49, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v39, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v37, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v35, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v29, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v30, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v31, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v33, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v27
+; GFX11-FAKE16-NEXT: .LBB28_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v29, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v30, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v31, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v33, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v35, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v37, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v39, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v49, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v51, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v53, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v54, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v55, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v64, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v65, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v66, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v67, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v68, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v69, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v70, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v71, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v80, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -22938,159 +23611,201 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v56f16_to_v56i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v28
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v27, v80, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v71, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v70, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v69, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v68, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v67, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v66, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v65, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v64, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v55, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v54, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v53, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v51, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v49, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v39, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v37, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v35, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v29, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v30, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v31, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v33, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v27
-; GFX11-NEXT: .LBB29_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v29, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v30, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v31, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v33, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v35, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v37, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v39, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v49, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v51, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v53, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v54, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v55, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v64, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v65, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v66, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v67, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v68, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v69, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v70, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v71, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v80, v27, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v56i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v56i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v28
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v80, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v71, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v70, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v69, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v68, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v67, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v66, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v65, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v64, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v55, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v54, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v53, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v51, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v49, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v39, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v37, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v35, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v29, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v30, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v31, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v33, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v27
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v29, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v30, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v31, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v32, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v33, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v34, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v35, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v36, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v37, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v38, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v39, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v48, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v49, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v50, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v51, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v53, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v54, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v55, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v64, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v65, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v66, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v67, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v68, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v69, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v70, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v71, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v80, v27, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
index b60649c..2837f2b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
define <30 x float> @bitcast_v30i32_to_v30f32(<30 x i32> %a, i32 %b) {
; GCN-LABEL: bitcast_v30i32_to_v30f32:
@@ -1820,173 +1821,217 @@ define <60 x i16> @bitcast_v30i32_to_v60i16(<30 x i32> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v30i32_to_v60i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB6_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB6_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v29, 3, v29
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 3, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 3, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB6_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v30i32_to_v60i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 3, v29
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 3, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 3, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 3, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB6_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v30i32_to_v60i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB6_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v29, 3, v29
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 3, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 3, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 3, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB6_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3218,109 +3263,153 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60i16_to_v30i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB7_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB7_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v30i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB7_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v30i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB7_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB7_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4470,173 +4559,217 @@ define <60 x half> @bitcast_v30i32_to_v60f16(<30 x i32> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v30i32_to_v60f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB8_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB8_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v29, 3, v29
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 3, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 3, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 3, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 3, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 3, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB8_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v30i32_to_v60f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 3, v29
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 3, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 3, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 3, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: .LBB8_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v30i32_to_v60f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v29, 3, v29
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 3, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 3, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 3, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, 3, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 3, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 3, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 3, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 3, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 3, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 3, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 3, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 3, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 3, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 3, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 3, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 3, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 3, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB8_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6060,109 +6193,153 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60f16_to_v30i32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB9_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB9_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v30i32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB9_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v30i32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB9_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB9_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7600,158 +7777,187 @@ define <60 x i16> @bitcast_v30f32_to_v60i16(<30 x float> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v30f32_to_v60i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB14_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB14_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB14_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v30f32_to_v60i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB14_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v30f32_to_v60i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB14_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB14_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8983,109 +9189,153 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60i16_to_v30f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB15_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB15_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v30f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v30f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB15_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10235,158 +10485,187 @@ define <60 x half> @bitcast_v30f32_to_v60f16(<30 x float> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v30f32_to_v60f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB16_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB16_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB16_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v30f32_to_v60f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: .LBB16_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v30f32_to_v60f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB16_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB16_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11810,109 +12089,153 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60f16_to_v30f32:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB17_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB17_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v30f32:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB17_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v30f32:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB17_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB17_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13000,181 +13323,233 @@ define <60 x i16> @bitcast_v15i64_to_v60i16(<15 x i64> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v15i64_to_v60i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB20_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB20_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB20_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v15i64_to_v60i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB20_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v15i64_to_v60i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB20_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB20_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14406,109 +14781,153 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60i16_to_v15i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB21_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB21_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v15i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB21_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v15i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB21_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB21_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15658,181 +16077,233 @@ define <60 x half> @bitcast_v15i64_to_v60f16(<15 x i64> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v15i64_to_v60f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB22_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB22_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB22_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v15i64_to_v60f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: .LBB22_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v15i64_to_v60f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB22_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v12, vcc_lo, v12, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v10, vcc_lo, v10, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v8, vcc_lo, v8, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v9, null, 0, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v6, vcc_lo, v6, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, v4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v2, vcc_lo, v2, 3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB22_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17256,109 +17727,153 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60f16_to_v15i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB23_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB23_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v15i64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v15i64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB23_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18074,158 +18589,187 @@ define <60 x i16> @bitcast_v15f64_to_v60i16(<15 x double> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v15f64_to_v60i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB24_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB24_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB24_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v15f64_to_v60i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB24_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v15f64_to_v60i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB24_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB24_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19457,109 +20001,153 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60i16_to_v15f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB25_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: .LBB25_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v15f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB25_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v15f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB25_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: .LBB25_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20639,158 +21227,187 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v15f64_to_v60f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB26_2: ; %Flow
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB26_4
-; GFX11-NEXT: ; %bb.3: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v0
-; GFX11-NEXT: .LBB26_4: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v15f64_to_v60f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: .LBB26_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v15f64_to_v60f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB26_4
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-FAKE16-NEXT: .LBB26_4: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v83, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v81, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v80, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v71, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v70, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v69, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v68, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v67, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v66, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v65, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v64, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v55, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v54, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v52, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v51, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v50, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v49, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v48, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v39, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v38, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v37, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v36, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v35, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v34, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v33, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v32, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v31, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v30, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -22214,109 +22831,153 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60f16_to_v15f64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v3
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
-; GFX11-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
-; GFX11-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB27_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
-; GFX11-NEXT: .LBB27_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v15f64:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v15f64:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v3
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v30
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v81, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v82, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v83, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v84, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v80, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v71, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v70, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v69, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v68, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v67, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v66, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v65, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v64, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v55, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v54, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v53, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v52, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v51, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v50, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v49, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v48, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v39, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v38, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v37, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v36, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v35, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v34, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v33, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v32, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v31, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB27_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23682,169 +24343,213 @@ define <60 x half> @bitcast_v60i16_to_v60f16(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60i16_to_v60f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v30
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB28_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v29, v84, v29, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v83, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v82, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v81, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v80, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v71, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v70, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v69, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v68, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v67, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v66, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v65, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v64, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v55, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v54, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v52, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v50, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v48, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v38, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v36, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v31, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v32, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v34, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v29
-; GFX11-NEXT: .LBB28_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v31, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v32, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v34, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v36, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v38, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v48, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v50, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v52, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v54, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v55, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v64, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v65, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v66, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v67, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v68, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v69, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v70, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v71, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v80, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v81, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v82, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v83, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v84, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v60f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: .LBB28_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v60f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB28_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v84, v29, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v83, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v82, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v81, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v80, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v71, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v70, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v69, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v68, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v67, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v66, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v65, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v64, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v55, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v54, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v52, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v50, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v48, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v38, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v36, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v31, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v32, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v34, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v29
+; GFX11-FAKE16-NEXT: .LBB28_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v31, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v32, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v34, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v36, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v38, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v48, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v50, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v52, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v54, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v55, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v64, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v65, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v66, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v67, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v68, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v69, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v70, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v71, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v80, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v81, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v82, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v83, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v84, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24848,169 +25553,213 @@ define <60 x i16> @bitcast_v60f16_to_v60i16(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v60f16_to_v60i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, exec_lo
-; GFX11-NEXT: v_cmpx_ne_u32_e32 0, v30
-; GFX11-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX11-NEXT: s_cbranch_execz .LBB29_2
-; GFX11-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-NEXT: v_perm_b32 v29, v84, v29, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v83, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v82, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v81, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v80, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v71, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v70, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v69, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v68, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v67, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v66, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v65, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v64, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v55, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v54, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v52, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v50, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v48, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v38, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v36, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v0, v31, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v32, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v34, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v29
-; GFX11-NEXT: .LBB29_2: ; %end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_perm_b32 v0, v31, v0, 0x5040100
-; GFX11-NEXT: v_perm_b32 v1, v32, v1, 0x5040100
-; GFX11-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v34, v3, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
-; GFX11-NEXT: v_perm_b32 v5, v36, v5, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v7, v38, v7, 0x5040100
-; GFX11-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
-; GFX11-NEXT: v_perm_b32 v9, v48, v9, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v50, v11, 0x5040100
-; GFX11-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
-; GFX11-NEXT: v_perm_b32 v13, v52, v13, 0x5040100
-; GFX11-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v15, v54, v15, 0x5040100
-; GFX11-NEXT: v_perm_b32 v16, v55, v16, 0x5040100
-; GFX11-NEXT: v_perm_b32 v17, v64, v17, 0x5040100
-; GFX11-NEXT: v_perm_b32 v18, v65, v18, 0x5040100
-; GFX11-NEXT: v_perm_b32 v19, v66, v19, 0x5040100
-; GFX11-NEXT: v_perm_b32 v20, v67, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v21, v68, v21, 0x5040100
-; GFX11-NEXT: v_perm_b32 v22, v69, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v23, v70, v23, 0x5040100
-; GFX11-NEXT: v_perm_b32 v24, v71, v24, 0x5040100
-; GFX11-NEXT: v_perm_b32 v25, v80, v25, 0x5040100
-; GFX11-NEXT: v_perm_b32 v26, v81, v26, 0x5040100
-; GFX11-NEXT: v_perm_b32 v27, v82, v27, 0x5040100
-; GFX11-NEXT: v_perm_b32 v28, v83, v28, 0x5040100
-; GFX11-NEXT: v_perm_b32 v29, v84, v29, 0x5040100
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v60i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v60i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, exec_lo
+; GFX11-FAKE16-NEXT: v_cmpx_ne_u32_e32 0, v30
+; GFX11-FAKE16-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB29_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v84, v29, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v83, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v82, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v81, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v80, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v71, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v70, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v69, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v68, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v67, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v66, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v65, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v64, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v55, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v54, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v52, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v50, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v48, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v38, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v36, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v31, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v32, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v34, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v29
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v31, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v32, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v33, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v34, v3, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v35, v4, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v36, v5, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v37, v6, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v7, v38, v7, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v8, v39, v8, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v9, v48, v9, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v10, v49, v10, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v11, v50, v11, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v12, v51, v12, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v13, v52, v13, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v14, v53, v14, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v15, v54, v15, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v16, v55, v16, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v17, v64, v17, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v18, v65, v18, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v19, v66, v19, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v20, v67, v20, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v21, v68, v21, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v22, v69, v22, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v23, v70, v23, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v24, v71, v24, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v25, v80, v25, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v26, v81, v26, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v27, v82, v27, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v28, v83, v28, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v29, v84, v29, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/ashr.v2i16.ll b/llvm/test/CodeGen/AMDGPU/ashr.v2i16.ll
index f2794ff..155042c 100644
--- a/llvm/test/CodeGen/AMDGPU/ashr.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/ashr.v2i16.ll
@@ -1,50 +1,172 @@
-; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
-; RUN: llc -mtriple=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
-
-; FIXME: Should be same on CI/VI
-; GCN-LABEL: {{^}}s_ashr_v2i16:
-; GFX9: s_load_dword [[LHS:s[0-9]+]]
-; GFX9: s_load_dword [[RHS:s[0-9]+]]
-; GFX9: v_mov_b32_e32 [[VLHS:v[0-9]+]], [[LHS]]
-; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[VLHS]]
-
-; CIVI: s_load_dword [[LHS:s[0-9]+]]
-; CIVI: s_load_dword [[RHS:s[0-9]+]]
-
-; CIVI-DAG: s_ashr_i32
-; CIVI-DAG: s_ashr_i32
-; CIVI-DAG: s_sext_i32_i16
-; CIVI-DAG: s_sext_i32_i16
-; CIVI-DAG: s_ashr_i32
-; CIVI-DAG: s_ashr_i32
-; CIVI-DAG: s_lshl_b32
-; CIVI: s_and_b32
-; CIVI: s_or_b32
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=VI %s
+; RUN: llc -mtriple=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=CI %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GFX10 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GFX11 %s
define amdgpu_kernel void @s_ashr_v2i16(ptr addrspace(1) %out, i32, <2 x i16> %lhs, i32, <2 x i16> %rhs) #0 {
+; GFX9-LABEL: s_ashr_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dword s6, s[4:5], 0x30
+; GFX9-NEXT: s_load_dword s7, s[4:5], 0x38
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: v_pk_ashrrev_i16 v0, s7, v0
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX9-NEXT: s_endpgm
+;
+; VI-LABEL: s_ashr_v2i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dword s6, s[4:5], 0x30
+; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; VI-NEXT: s_load_dword s4, s[4:5], 0x38
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_ashr_i32 s5, s6, 16
+; VI-NEXT: s_sext_i32_i16 s6, s6
+; VI-NEXT: s_ashr_i32 s7, s4, 16
+; VI-NEXT: s_sext_i32_i16 s4, s4
+; VI-NEXT: s_ashr_i32 s5, s5, s7
+; VI-NEXT: s_ashr_i32 s4, s6, s4
+; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; CI-LABEL: s_ashr_v2i16:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dword s6, s[4:5], 0xc
+; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
+; CI-NEXT: s_load_dword s4, s[4:5], 0xe
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_ashr_i32 s5, s6, 16
+; CI-NEXT: s_sext_i32_i16 s6, s6
+; CI-NEXT: s_lshr_b32 s7, s4, 16
+; CI-NEXT: s_ashr_i32 s5, s5, s7
+; CI-NEXT: s_ashr_i32 s4, s6, s4
+; CI-NEXT: s_lshl_b32 s5, s5, 16
+; CI-NEXT: s_and_b32 s4, s4, 0xffff
+; CI-NEXT: s_or_b32 s4, s4, s5
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_ashr_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_clause 0x2
+; GFX10-NEXT: s_load_dword s2, s[4:5], 0x30
+; GFX10-NEXT: s_load_dword s3, s[4:5], 0x38
+; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: v_pk_ashrrev_i16 v0, s3, s2
+; GFX10-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-NEXT: s_mov_b32 s2, -1
+; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_ashr_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x30
+; GFX11-NEXT: s_load_b32 s3, s[4:5], 0x38
+; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_pk_ashrrev_i16 v0, s3, s2
+; GFX11-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: buffer_store_b32 v0, off, s[0:3], 0
+; GFX11-NEXT: s_endpgm
%result = ashr <2 x i16> %lhs, %rhs
store <2 x i16> %result, ptr addrspace(1) %out
ret void
}
-; GCN-LABEL: {{^}}v_ashr_v2i16:
-; GCN: {{buffer|flat|global}}_load_dwordx2 v[[[LHS:[0-9]+]]:[[RHS:[0-9]+]]]
-; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], v[[RHS]], v[[LHS]]
-
-; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-
-; CI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
-; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, 16, v[[LHS]]
-; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
-; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
-; CI: v_and_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
-; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_ashr_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GFX9-LABEL: v_ashr_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_pk_ashrrev_i16 v0, v1, v0
+; GFX9-NEXT: global_store_dword v2, v0, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; VI-LABEL: v_ashr_v2i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_ashrrev_i16_e32 v4, v1, v0
+; VI-NEXT: v_ashrrev_i16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_or_b32_e32 v0, v4, v0
+; VI-NEXT: flat_store_dword v[2:3], v0
+; VI-NEXT: s_endpgm
+;
+; CI-LABEL: v_ashr_v2i16:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, 0
+; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; CI-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_bfe_i32 v4, v2, 0, 16
+; CI-NEXT: v_ashrrev_i32_e32 v2, 16, v2
+; CI-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; CI-NEXT: v_ashrrev_i32_e32 v2, v5, v2
+; CI-NEXT: v_ashrrev_i32_e32 v3, v3, v4
+; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; CI-NEXT: v_or_b32_e32 v2, v3, v2
+; CI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; CI-NEXT: s_endpgm
+;
+; GFX10-LABEL: v_ashr_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_pk_ashrrev_i16 v0, v1, v0
+; GFX10-NEXT: global_store_dword v2, v0, s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: v_ashr_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: global_load_b64 v[0:1], v2, s[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_pk_ashrrev_i16 v0, v1, v0
+; GFX11-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
@@ -57,11 +179,90 @@ define amdgpu_kernel void @v_ashr_v2i16(ptr addrspace(1) %out, ptr addrspace(1)
ret void
}
-; GCN-LABEL: {{^}}ashr_v_s_v2i16:
-; GFX9: s_load_dword [[RHS:s[0-9]+]]
-; GFX9: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
-; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
define amdgpu_kernel void @ashr_v_s_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in, <2 x i16> %sgpr) #0 {
+; GFX9-LABEL: ashr_v_s_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: s_load_dword s6, s[4:5], 0x34
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v1, v0, s[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_pk_ashrrev_i16 v1, s6, v1
+; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; VI-LABEL: ashr_v_s_v2i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dword s4, s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1]
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_lshr_b32 s1, s4, 16
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_mov_b32_e32 v2, s1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_ashrrev_i16_e32 v4, s4, v3
+; VI-NEXT: v_ashrrev_i16_sdwa v2, v2, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_e32 v2, v4, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
+;
+; CI-LABEL: ashr_v_s_v2i16:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; CI-NEXT: s_load_dword s8, s[4:5], 0xd
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, 0
+; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; CI-NEXT: s_lshr_b32 s4, s8, 16
+; CI-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_bfe_i32 v3, v2, 0, 16
+; CI-NEXT: v_ashrrev_i32_e32 v2, 16, v2
+; CI-NEXT: v_ashrrev_i32_e32 v2, s4, v2
+; CI-NEXT: v_ashrrev_i32_e32 v3, s8, v3
+; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; CI-NEXT: v_or_b32_e32 v2, v3, v2
+; CI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; CI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ashr_v_s_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX10-NEXT: s_load_dword s4, s[4:5], 0x34
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_pk_ashrrev_i16 v1, s4, v1
+; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ashr_v_s_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_load_b32 s4, s[4:5], 0x34
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: global_load_b32 v1, v0, s[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_pk_ashrrev_i16 v1, s4, v1
+; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
@@ -72,11 +273,90 @@ define amdgpu_kernel void @ashr_v_s_v2i16(ptr addrspace(1) %out, ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}ashr_s_v_v2i16:
-; GFX9: s_load_dword [[LHS:s[0-9]+]]
-; GFX9: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
-; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
define amdgpu_kernel void @ashr_s_v_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in, <2 x i16> %sgpr) #0 {
+; GFX9-LABEL: ashr_s_v_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: s_load_dword s6, s[4:5], 0x34
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v1, v0, s[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_pk_ashrrev_i16 v1, v1, s6
+; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; VI-LABEL: ashr_s_v_v2i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dword s4, s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1]
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_lshr_b32 s1, s4, 16
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_mov_b32_e32 v2, s1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_ashrrev_i16_e64 v4, v3, s4
+; VI-NEXT: v_ashrrev_i16_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT: v_or_b32_e32 v2, v4, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
+;
+; CI-LABEL: ashr_s_v_v2i16:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; CI-NEXT: s_load_dword s8, s[4:5], 0xd
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, 0
+; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; CI-NEXT: s_ashr_i32 s4, s8, 16
+; CI-NEXT: s_sext_i32_i16 s5, s8
+; CI-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; CI-NEXT: v_ashr_i32_e32 v2, s5, v2
+; CI-NEXT: v_ashr_i32_e32 v3, s4, v3
+; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_or_b32_e32 v2, v2, v3
+; CI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; CI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ashr_s_v_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX10-NEXT: s_load_dword s4, s[4:5], 0x34
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_pk_ashrrev_i16 v1, v1, s4
+; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ashr_s_v_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_load_b32 s4, s[4:5], 0x34
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: global_load_b32 v1, v0, s[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_pk_ashrrev_i16 v1, v1, s4
+; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
@@ -87,10 +367,82 @@ define amdgpu_kernel void @ashr_s_v_v2i16(ptr addrspace(1) %out, ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}ashr_imm_v_v2i16:
-; GCN: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
-; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], -4
define amdgpu_kernel void @ashr_imm_v_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GFX9-LABEL: ashr_imm_v_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v1, v0, s[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_pk_ashrrev_i16 v1, v1, -4 op_sel_hi:[1,0]
+; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; VI-LABEL: ashr_imm_v_v2i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: v_mov_b32_e32 v4, -4
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1]
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_ashrrev_i16_e64 v2, v3, -4
+; VI-NEXT: v_ashrrev_i16_sdwa v3, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT: v_or_b32_e32 v2, v2, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
+;
+; CI-LABEL: ashr_imm_v_v2i16:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, 0
+; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; CI-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; CI-NEXT: v_ashr_i32_e32 v2, -4, v2
+; CI-NEXT: v_ashr_i32_e32 v3, -4, v3
+; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_or_b32_e32 v2, v2, v3
+; CI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; CI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ashr_imm_v_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_pk_ashrrev_i16 v1, v1, -4 op_sel_hi:[1,0]
+; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ashr_imm_v_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: global_load_b32 v1, v0, s[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_pk_ashrrev_i16 v1, v1, -4 op_sel_hi:[1,0]
+; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
@@ -101,10 +453,80 @@ define amdgpu_kernel void @ashr_imm_v_v2i16(ptr addrspace(1) %out, ptr addrspace
ret void
}
-; GCN-LABEL: {{^}}ashr_v_imm_v2i16:
-; GCN: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
-; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], 8, [[LHS]]
define amdgpu_kernel void @ashr_v_imm_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GFX9-LABEL: ashr_v_imm_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v1, v0, s[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1]
+; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; VI-LABEL: ashr_v_imm_v2i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1]
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_mov_b32_e32 v2, 8
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_ashrrev_i16_sdwa v2, v2, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v2, sext(v3), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
+;
+; CI-LABEL: ashr_v_imm_v2i16:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, 0
+; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; CI-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_bfe_i32 v3, v2, 0, 16
+; CI-NEXT: v_ashrrev_i32_e32 v2, 24, v2
+; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_bfe_u32 v3, v3, 8, 16
+; CI-NEXT: v_or_b32_e32 v2, v3, v2
+; CI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; CI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ashr_v_imm_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1]
+; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ashr_v_imm_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: global_load_b32 v1, v0, s[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1]
+; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%in.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %in, i64 %tid.ext
@@ -115,20 +537,97 @@ define amdgpu_kernel void @ashr_v_imm_v2i16(ptr addrspace(1) %out, ptr addrspace
ret void
}
-; GCN-LABEL: {{^}}v_ashr_v4i16:
-; GCN: {{buffer|flat|global}}_load_dwordx4
-; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-
-; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-
-; GCN: {{buffer|flat|global}}_store_dwordx2
define amdgpu_kernel void @v_ashr_v4i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GFX9-LABEL: v_ashr_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v4, 3, v0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_pk_ashrrev_i16 v1, v3, v1
+; GFX9-NEXT: v_pk_ashrrev_i16 v0, v2, v0
+; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; VI-LABEL: v_ashr_v4i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v4, 3, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; VI-NEXT: v_mov_b32_e32 v5, s1
+; VI-NEXT: v_add_u32_e32 v4, vcc, s0, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_ashrrev_i16_e32 v6, v3, v1
+; VI-NEXT: v_ashrrev_i16_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_ashrrev_i16_e32 v3, v2, v0
+; VI-NEXT: v_ashrrev_i16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_or_b32_e32 v1, v6, v1
+; VI-NEXT: v_or_b32_e32 v0, v3, v0
+; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
+; VI-NEXT: s_endpgm
+;
+; CI-LABEL: v_ashr_v4i16:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, 0
+; CI-NEXT: v_lshlrev_b32_e32 v4, 3, v0
+; CI-NEXT: v_mov_b32_e32 v5, 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: buffer_load_dwordx4 v[0:3], v[4:5], s[4:7], 0 addr64
+; CI-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_bfe_i32 v6, v0, 0, 16
+; CI-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; CI-NEXT: v_bfe_i32 v7, v1, 0, 16
+; CI-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; CI-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; CI-NEXT: v_lshrrev_b32_e32 v9, 16, v3
+; CI-NEXT: v_ashrrev_i32_e32 v1, v9, v1
+; CI-NEXT: v_ashrrev_i32_e32 v3, v3, v7
+; CI-NEXT: v_ashrrev_i32_e32 v0, v8, v0
+; CI-NEXT: v_ashrrev_i32_e32 v2, v2, v6
+; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; CI-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; CI-NEXT: v_or_b32_e32 v1, v3, v1
+; CI-NEXT: v_or_b32_e32 v0, v2, v0
+; CI-NEXT: buffer_store_dwordx2 v[0:1], v[4:5], s[0:3], 0 addr64
+; CI-NEXT: s_endpgm
+;
+; GFX10-LABEL: v_ashr_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 3, v0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_pk_ashrrev_i16 v1, v3, v1
+; GFX10-NEXT: v_pk_ashrrev_i16 v0, v2, v0
+; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: v_ashr_v4i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: global_load_b128 v[0:3], v4, s[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_pk_ashrrev_i16 v1, v3, v1
+; GFX11-NEXT: v_pk_ashrrev_i16 v0, v2, v0
+; GFX11-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%in.gep = getelementptr inbounds <4 x i16>, ptr addrspace(1) %in, i64 %tid.ext
@@ -141,12 +640,90 @@ define amdgpu_kernel void @v_ashr_v4i16(ptr addrspace(1) %out, ptr addrspace(1)
ret void
}
-; GCN-LABEL: {{^}}ashr_v_imm_v4i16:
-; GCN: {{buffer|flat|global}}_load_dwordx2
-; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
-; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
-; GCN: {{buffer|flat|global}}_store_dwordx2
define amdgpu_kernel void @ashr_v_imm_v4i16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GFX9-LABEL: ashr_v_imm_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1]
+; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; VI-LABEL: ashr_v_imm_v4i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT: v_mov_b32_e32 v4, 8
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_ashrrev_i16_sdwa v5, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_ashrrev_i16_sdwa v4, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v1, sext(v1), v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, sext(v0), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT: s_endpgm
+;
+; CI-LABEL: ashr_v_imm_v4i16:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, 0
+; CI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; CI-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_bfe_i32 v6, v3, 0, 16
+; CI-NEXT: v_ashr_i64 v[3:4], v[2:3], 56
+; CI-NEXT: v_bfe_i32 v5, v2, 0, 16
+; CI-NEXT: v_ashrrev_i32_e32 v2, 24, v2
+; CI-NEXT: v_bfe_u32 v4, v6, 8, 16
+; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_bfe_u32 v5, v5, 8, 16
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_or_b32_e32 v3, v4, v3
+; CI-NEXT: v_or_b32_e32 v2, v5, v2
+; CI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; CI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ashr_v_imm_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1]
+; GFX10-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ashr_v_imm_v4i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: global_load_b64 v[0:1], v2, s[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%in.gep = getelementptr inbounds <4 x i16>, ptr addrspace(1) %in, i64 %tid.ext
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
index e7f4843..198bf83 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -3414,54 +3416,103 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; --------------------------------------------------------------------
define half @buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v5, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v2, v5, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v1, s4, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v1, v1, v0
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, s4, v1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_and_or_b32 v1, v2, s6, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB13_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v2, v5, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, v1.l, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v2, v5, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, v1, v0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -3497,47 +3548,89 @@ define half @buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, s6, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: v_mov_b32_e32 v5, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v2, v5, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, s4, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v1, v1, v0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, s4, v1
-; GFX11-NEXT: v_and_or_b32 v1, v2, s6, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB13_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, v5, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v2, v5, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, v1, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -3764,53 +3857,101 @@ define half @buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__amdgpu_no_fine_gr
}
define void @buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v2, v3, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v1, s4, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v1, v1, v0
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, s4, v1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_and_or_b32 v1, v2, s6, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB14_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v2, v3, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, v1.l, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v2, v3, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, v1, v0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -3845,46 +3986,87 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v2, v3, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, s4, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v1, v1, v0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, s4, v1
-; GFX11-NEXT: v_and_or_b32 v1, v2, s6, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB14_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, v3, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v2, v3, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, v1, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -4103,86 +4285,167 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f16__offset__amdgpu_no_fine_
}
define half @buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory(ptr addrspace(7) %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v6, 0x200, v4
-; GFX12-NEXT: s_mov_b32 s1, exec_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v6
-; GFX12-NEXT: v_and_b32_e32 v10, -4, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v7, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v11, v7
-; GFX12-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v7, v10, s[4:7], null offen
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB15_3: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Loop Header: Depth=1
-; GFX12-NEXT: ; Child Loop BB15_4 Depth 2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v6, v4, v7
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v6, v6, v5
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, v4, v6
-; GFX12-NEXT: v_and_or_b32 v6, v7, v11, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
-; GFX12-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
-; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[8:9], v10, s[4:7], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_4
-; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v8, v7
-; GFX12-NEXT: v_mov_b32_e32 v7, v8
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB15_3
-; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v4, v8
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x200, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v6
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v10, -4, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v7, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v11, v7
+; GFX12-TRUE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v10, s[4:7], null offen
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-TRUE16-NEXT: ; %bb.2:
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX12-TRUE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v6, v4, v7
+; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v6.l, v6.l, v5.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, v4, v6
+; GFX12-TRUE16-NEXT: v_and_or_b32 v6, v7, v11, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
+; GFX12-TRUE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[8:9], v10, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v8, v7
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v8
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v4, v8
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x200, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v6
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v10, -4, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v7, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v11, v7
+; GFX12-FAKE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v10, s[4:7], null offen
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-FAKE16-NEXT: ; %bb.2:
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v6, v4, v7
+; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, v4, v6
+; GFX12-FAKE16-NEXT: v_and_or_b32 v6, v7, v11, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
+; GFX12-FAKE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[8:9], v10, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v8, v7
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v8
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v4, v8
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4251,82 +4514,159 @@ define half @buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v4, v8
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x200, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v6
-; GFX11-NEXT: v_and_b32_e32 v10, -4, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v7, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v11, v7
-; GFX11-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v7, v10, s[4:7], 0 offen
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB15_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB15_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, v4, v7
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v6, v6, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, v4, v6
-; GFX11-NEXT: v_and_or_b32 v6, v7, v11, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
-; GFX11-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[8:9], v10, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v8, v7
-; GFX11-NEXT: v_mov_b32_e32 v7, v8
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB15_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v4, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x200, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, -4, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v7, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v11, v7
+; GFX11-TRUE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v10, s[4:7], 0 offen
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, v4, v7
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v6.l, v6.l, v5.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, v4, v6
+; GFX11-TRUE16-NEXT: v_and_or_b32 v6, v7, v11, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
+; GFX11-TRUE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[8:9], v10, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v8, v7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v8
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v4, v8
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x200, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, -4, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v7, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v11, v7
+; GFX11-FAKE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v10, s[4:7], 0 offen
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, v4, v7
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, v4, v6
+; GFX11-FAKE16-NEXT: v_and_or_b32 v6, v7, v11, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
+; GFX11-FAKE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[8:9], v10, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v8, v7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v8
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v4, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -4745,64 +5085,124 @@ define half @buffer_fat_ptr_agent_atomic_fadd_ret_f16__offset__waterfall__amdgpu
; --------------------------------------------------------------------
define bfloat @buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v4, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: v_add_f32_e32 v0, v0, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX12-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB16_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-FAKE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4846,57 +5246,110 @@ define bfloat @buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine
; GFX942-NEXT: v_lshrrev_b32_e32 v0, s6, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_f32_e32 v0, v0, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB16_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v2
+; GFX11-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -5151,63 +5604,122 @@ define bfloat @buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__amdgpu_no_fine
}
define void @buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v2, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: v_add_f32_e32 v0, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX12-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v4
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB17_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -5250,56 +5762,108 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine
; GFX942-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v2, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_f32_e32 v0, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v4
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB17_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -5546,97 +6110,190 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_bf16__offset__amdgpu_no_fine
}
define bfloat @buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory(ptr addrspace(7) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX12-NEXT: s_mov_b32 s1, exec_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX12-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v9, v6
-; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB18_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v5
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB18_3: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Loop Header: Depth=1
-; GFX12-NEXT: ; Child Loop BB18_4 Depth 2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_add_f32_e32 v4, v4, v10
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX12-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
-; GFX12-NEXT: v_mov_b32_e32 v5, v6
-; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
-; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB18_4
-; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB18_3
-; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v9, v6
+; GFX12-TRUE16-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-TRUE16-NEXT: ; %bb.2:
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX12-TRUE16-NEXT: .LBB18_3: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: ; Child Loop BB18_4 Depth 2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, v4, v10
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v7, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
+; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB18_4
+; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB18_3
+; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX12-FAKE16-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-FAKE16-NEXT: ; %bb.2:
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB18_3: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: ; Child Loop BB18_4 Depth 2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, v4, v10
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
+; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB18_4
+; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB18_3
+; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -5713,94 +6370,184 @@ define bfloat @buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amd
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v8, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX11-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v9, v6
-; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB18_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v5
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB18_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB18_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_f32_e32 v4, v4, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX11-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
-; GFX11-NEXT: v_mov_b32_e32 v5, v6
-; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB18_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB18_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v9, v6
+; GFX11-TRUE16-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB18_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB18_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v7, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB18_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB18_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX11-FAKE16-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB18_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB18_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB18_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB18_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8338,58 +9085,113 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
-; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
-; GFX11-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, v1, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8721,54 +9523,105 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
-; GFX11-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v5
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_add_f32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9149,91 +10002,176 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_mov_b32_e32 v0, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB28_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB28_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB28_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v9 :: v_dual_add_f32 v4, v4, v8
-; GFX11-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
-; GFX11-NEXT: v_mov_b32_e32 v5, v6
-; GFX11-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB28_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB28_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v4
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX11-TRUE16-NEXT: .LBB28_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB28_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v8 :: v_dual_add_f32 v4, v4, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB28_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB28_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v9 :: v_dual_add_f32 v4, v4, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9755,58 +10693,113 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
-; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
-; GFX11-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB29_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, v1, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset:
; GFX10: ; %bb.0:
@@ -10138,54 +11131,105 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
-; GFX11-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v5
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB30_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_add_f32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset:
; GFX10: ; %bb.0:
@@ -10514,58 +11558,113 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
-; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
-; GFX11-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB31_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, v1, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -10897,54 +11996,105 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
-; GFX11-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v5
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB32_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_add_f32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -11272,54 +12422,105 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
-; GFX11-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v5
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB33_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_add_f32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
index b044719..bee2813 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -2482,56 +2484,107 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; --------------------------------------------------------------------
define half @buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_max_num_f16_e32 v5, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v4, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
-; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB10_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v2, v5, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v1.l, v1.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v0, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v0, v0, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -2569,50 +2622,95 @@ define half @buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, s6, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_max_f16_e32 v5, v0, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
-; GFX11-NEXT: v_max_f16_e32 v0, v0, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB10_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, v5, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -2847,55 +2945,105 @@ define half @buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__amdgpu_no_fine_gr
}
define void @buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_max_num_f16_e32 v3, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v2, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
-; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v4
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB11_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v2, v3, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v1.l, v1.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v0, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -2932,49 +3080,93 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_max_f16_e32 v3, v0, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v2, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
-; GFX11-NEXT: v_max_f16_e32 v0, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v4
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB11_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, v3, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -3201,89 +3393,172 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f16__offset__amdgpu_no_fine_
}
define half @buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory(ptr addrspace(7) %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX12-NEXT: s_mov_b32 s1, exec_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX12-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v9, v6
-; GFX12-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB12_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_max_num_f16_e32 v10, v5, v5
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB12_3: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Loop Header: Depth=1
-; GFX12-NEXT: ; Child Loop BB12_4 Depth 2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v10
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
-; GFX12-NEXT: v_mov_b32_e32 v5, v6
-; GFX12-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
-; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB12_4
-; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB12_3
-; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v10, -4, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 3, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v9, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v11, v6
+; GFX12-TRUE16-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v10, s[4:7], null offen
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-TRUE16-NEXT: ; %bb.2:
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX12-TRUE16-NEXT: .LBB12_3: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: ; Child Loop BB12_4 Depth 2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v9, v6
+; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v4.h, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v9, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v11, v5
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v8, v6 :: v_dual_mov_b32 v7, v5
+; GFX12-TRUE16-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
+; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[7:8], v10, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_4
+; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v7, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_3
+; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v9, v7
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX12-FAKE16-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-FAKE16-NEXT: ; %bb.2:
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v10, v5, v5
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB12_3: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: ; Child Loop BB12_4 Depth 2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v10
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
+; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_4
+; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_3
+; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -3354,85 +3629,164 @@ define half @buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v8, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX11-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v9, v6
-; GFX11-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB12_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_max_f16_e32 v10, v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB12_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB12_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-NEXT: v_max_f16_e32 v4, v4, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
-; GFX11-NEXT: v_mov_b32_e32 v5, v6
-; GFX11-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB12_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB12_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, -4, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 3, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v9, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v11, v6
+; GFX11-TRUE16-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v10, s[4:7], 0 offen
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v5.l, v5.l
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB12_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB12_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v9, v6
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v4.h, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v9, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v11, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v6 :: v_dual_mov_b32 v7, v5
+; GFX11-TRUE16-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[7:8], v10, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v7, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v9, v7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX11-FAKE16-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v10, v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB12_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB12_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -3859,64 +4213,124 @@ define half @buffer_fat_ptr_agent_atomic_fmax_ret_f16__offset__waterfall__amdgpu
; --------------------------------------------------------------------
define bfloat @buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v4, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX12-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB13_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v0, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v0, v0, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-FAKE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -3960,57 +4374,110 @@ define bfloat @buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine
; GFX942-NEXT: v_lshrrev_b32_e32 v0, s6, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB13_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v0, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v2
+; GFX11-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v0, v0, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -4267,63 +4734,122 @@ define bfloat @buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__amdgpu_no_fine
}
define void @buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v2, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX12-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v4
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB14_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4366,56 +4892,108 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine
; GFX942-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v2, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v4
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB14_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -4664,97 +5242,190 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_bf16__offset__amdgpu_no_fine
}
define bfloat @buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory(ptr addrspace(7) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX12-NEXT: s_mov_b32 s1, exec_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX12-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v9, v6
-; GFX12-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v5
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB15_3: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Loop Header: Depth=1
-; GFX12-NEXT: ; Child Loop BB15_4 Depth 2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_max_num_f32_e32 v4, v4, v10
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX12-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
-; GFX12-NEXT: v_mov_b32_e32 v5, v6
-; GFX12-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
-; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_4
-; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB15_3
-; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v9, v6
+; GFX12-TRUE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-TRUE16-NEXT: ; %bb.2:
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX12-TRUE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v10
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v7, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX12-FAKE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-FAKE16-NEXT: ; %bb.2:
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v10
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4831,94 +5502,184 @@ define bfloat @buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amd
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v8, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX11-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v9, v6
-; GFX11-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v5
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB15_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB15_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_max_f32_e32 v4, v4, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX11-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
-; GFX11-NEXT: v_mov_b32_e32 v5, v6
-; GFX11-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB15_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v9, v6
+; GFX11-TRUE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v7, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX11-FAKE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6614,61 +7375,120 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; --------------------------------------------------------------------
define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v4, s4
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX12-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v0
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v1, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX12-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
-; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX12-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX12-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
-; GFX12-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB19_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v1, v1, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX12-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v1, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v0, v0, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6715,58 +7535,113 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
-; GFX11-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB19_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7039,56 +7914,109 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
}
define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB20_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_max_num_f32 v5, v5, v3 :: v_dual_max_num_f32 v0, v0, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
-; GFX12-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v5
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB20_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
+; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_max_num_f32 v0, v0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v5
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX12-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB20_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
+; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v3 :: v_dual_max_num_f32 v0, v0, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB20_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7134,54 +8062,105 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB20_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v3 :: v_dual_max_f32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
-; GFX11-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v5
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB20_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_max_f32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB20_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v3 :: v_dual_max_f32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB20_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7450,95 +8429,186 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
}
define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory(ptr addrspace(7) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
-; GFX12-NEXT: s_mov_b32 s1, exec_lo
-; GFX12-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-NEXT: ; implicit-def: $vgpr4
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB21_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v5
-; GFX12-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB21_3: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Loop Header: Depth=1
-; GFX12-NEXT: ; Child Loop BB21_4 Depth 2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_max_num_f32 v5, v5, v9 :: v_dual_max_num_f32 v4, v4, v8
-; GFX12-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
-; GFX12-NEXT: v_mov_b32_e32 v5, v6
-; GFX12-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
-; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB21_4
-; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB21_3
-; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v4
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
+; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
+; GFX12-TRUE16-NEXT: ; %bb.2:
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX12-TRUE16-NEXT: .LBB21_3: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v8 :: v_dual_max_num_f32 v4, v4, v9
+; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v4, v5
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
+; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
+; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3
+; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
+; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
+; GFX12-FAKE16-NEXT: ; %bb.2:
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB21_3: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v9 :: v_dual_max_num_f32 v4, v4, v8
+; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
+; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
+; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3
+; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7618,91 +8688,176 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_mov_b32_e32 v0, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB21_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB21_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB21_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v9 :: v_dual_max_f32 v4, v4, v8
-; GFX11-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
-; GFX11-NEXT: v_mov_b32_e32 v5, v6
-; GFX11-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB21_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB21_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v4
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX11-TRUE16-NEXT: .LBB21_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v8 :: v_dual_max_f32 v4, v4, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB21_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v9 :: v_dual_max_f32 v4, v4, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
index e33c8aa..1826743 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -2482,56 +2484,107 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; --------------------------------------------------------------------
define half @buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_max_num_f16_e32 v5, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v4, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
-; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB10_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v2, v5, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v1.l, v1.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v1.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v0, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v0, v0, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -2569,50 +2622,95 @@ define half @buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, s6, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_max_f16_e32 v5, v0, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
-; GFX11-NEXT: v_min_f16_e32 v0, v0, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB10_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, v5, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v1.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[3:4], v5, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v0, v0, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -2847,55 +2945,105 @@ define half @buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__amdgpu_no_fine_gr
}
define void @buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_max_num_f16_e32 v3, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v2, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
-; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v4
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB11_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v2, v3, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v1.l, v1.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v1.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v0, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -2932,49 +3080,93 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_max_f16_e32 v3, v0, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v2, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
-; GFX11-NEXT: v_min_f16_e32 v0, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v4
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB11_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, v3, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, s4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v1.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, s4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -3201,89 +3393,172 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f16__offset__amdgpu_no_fine_
}
define half @buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory(ptr addrspace(7) %ptr, half %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX12-NEXT: s_mov_b32 s1, exec_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX12-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v9, v6
-; GFX12-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB12_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_max_num_f16_e32 v10, v5, v5
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB12_3: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Loop Header: Depth=1
-; GFX12-NEXT: ; Child Loop BB12_4 Depth 2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-NEXT: v_min_num_f16_e32 v4, v4, v10
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
-; GFX12-NEXT: v_mov_b32_e32 v5, v6
-; GFX12-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
-; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB12_4
-; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB12_3
-; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v10, -4, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 3, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v9, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v11, v6
+; GFX12-TRUE16-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v10, s[4:7], null offen
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-TRUE16-NEXT: ; %bb.2:
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX12-TRUE16-NEXT: .LBB12_3: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: ; Child Loop BB12_4 Depth 2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v9, v6
+; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v4.h, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v9, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v11, v5
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v8, v6 :: v_dual_mov_b32 v7, v5
+; GFX12-TRUE16-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
+; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[7:8], v10, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_4
+; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v7, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_3
+; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v9, v7
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX12-FAKE16-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-FAKE16-NEXT: ; %bb.2:
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v10, v5, v5
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB12_3: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: ; Child Loop BB12_4 Depth 2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, v4, v10
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
+; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_4
+; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_3
+; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -3354,85 +3629,164 @@ define half @buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v8, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX11-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v9, v6
-; GFX11-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB12_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_max_f16_e32 v10, v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB12_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB12_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-NEXT: v_min_f16_e32 v4, v4, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
-; GFX11-NEXT: v_mov_b32_e32 v5, v6
-; GFX11-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB12_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB12_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, -4, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 3, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v9, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v11, v6
+; GFX11-TRUE16-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v10, s[4:7], 0 offen
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v5.l, v5.l
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB12_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB12_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v9, v6
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v4.h, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v9, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v11, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v6 :: v_dual_mov_b32 v7, v5
+; GFX11-TRUE16-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[7:8], v10, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v7, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v9, v7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX11-FAKE16-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v10, v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB12_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB12_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, v4, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: .LBB12_4: ; Parent Loop BB12_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB12_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -3859,64 +4213,124 @@ define half @buffer_fat_ptr_agent_atomic_fmin_ret_f16__offset__waterfall__amdgpu
; --------------------------------------------------------------------
define bfloat @buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v4, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX12-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB13_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v0, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v0, v0, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-FAKE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -3960,57 +4374,110 @@ define bfloat @buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine
; GFX942-NEXT: v_lshrrev_b32_e32 v0, s6, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB13_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v0, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v2
+; GFX11-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v4, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v0, v0, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -4267,63 +4734,122 @@ define bfloat @buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__amdgpu_no_fine
}
define void @buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_addk_co_i32 s16, 0x200
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s4, s16, -4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v2, s4
-; GFX12-NEXT: s_and_b32 s4, s16, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s4, s4, 3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX12-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
-; GFX12-NEXT: s_not_b32 s6, s5
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX12-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v4
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB14_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX12-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX12-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX12-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_addk_co_i32 s16, 0x200
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX12-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], null offen
+; GFX12-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4366,56 +4892,108 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine
; GFX942-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_addk_i32 s16, 0x200
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v0
-; GFX11-NEXT: s_and_b32 s4, s16, -4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v2, s4
-; GFX11-NEXT: s_and_b32 s4, s16, 3
-; GFX11-NEXT: s_lshl_b32 s4, s4, 3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_lshl_b32 s5, 0xffff, s4
-; GFX11-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
-; GFX11-NEXT: s_not_b32 s6, s5
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, s4, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, s4, v0
-; GFX11-NEXT: v_and_or_b32 v0, v1, s6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v4
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB14_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-TRUE16-NEXT: s_not_b32 s6, s5
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, s4, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_addk_i32 s16, 0x200
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, -4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, 0xffff, s4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v2, s[0:3], 0 offen
+; GFX11-FAKE16-NEXT: s_not_b32 s6, s5
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s4, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, s4, v0
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, v1, s6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v2, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -4664,97 +5242,190 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_bf16__offset__amdgpu_no_fine
}
define bfloat @buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory(ptr addrspace(7) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX12-NEXT: s_mov_b32 s1, exec_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX12-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v9, v6
-; GFX12-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v5
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB15_3: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Loop Header: Depth=1
-; GFX12-NEXT: ; Child Loop BB15_4 Depth 2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_min_num_f32_e32 v4, v4, v10
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX12-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
-; GFX12-NEXT: v_mov_b32_e32 v5, v6
-; GFX12-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
-; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_4
-; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB15_3
-; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v9, v6
+; GFX12-TRUE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-TRUE16-NEXT: ; %bb.2:
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX12-TRUE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v10
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v7, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX12-FAKE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], null offen
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-FAKE16-NEXT: ; %bb.2:
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v10
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4831,94 +5502,184 @@ define bfloat @buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amd
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v8, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_b32_e32 v6, 3, v4
-; GFX11-NEXT: v_and_b32_e32 v8, -4, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 3, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v9, v6
-; GFX11-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v5
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB15_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB15_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v7, v6
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_min_f32_e32 v4, v4, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v7, v4
-; GFX11-NEXT: v_and_or_b32 v5, v6, v9, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
-; GFX11-NEXT: v_mov_b32_e32 v5, v6
-; GFX11-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB15_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v7, v4
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v9, v6
+; GFX11-TRUE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v7, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x200, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 3, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, -4, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 3, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v6, v7, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v9, v6
+; GFX11-FAKE16-NEXT: .LBB15_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v8, s[4:7], 0 offen
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB15_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB15_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v7, v6
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v7, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v9, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: .LBB15_4: ; Parent Loop BB15_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v8, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB15_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v7, v4
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6614,61 +7375,120 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; --------------------------------------------------------------------
define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v4, s4
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX12-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v0
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v1, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX12-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
-; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX12-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX12-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
-; GFX12-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB19_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v1, v1, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX12-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v1, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v0, v0, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6715,58 +7535,113 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: v_mov_b32_e32 v4, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
-; GFX11-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB19_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v7, v9 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v5, v8, s4
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7039,56 +7914,109 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
}
define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory(ptr addrspace(7) inreg %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
-; GFX12-NEXT: s_mov_b32 s5, 0
-; GFX12-NEXT: .LBB20_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_min_num_f32 v5, v5, v3 :: v_dual_min_num_f32 v0, v0, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
-; GFX12-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v5
-; GFX12-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_cbranch_execnz .LBB20_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
+; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_min_num_f32 v0, v0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v5
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX12-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB20_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
+; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v3 :: v_dual_min_num_f32 v0, v0, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX12-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB20_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7134,54 +8062,105 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB20_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_min_f32 v5, v5, v3 :: v_dual_min_f32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s4, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
-; GFX11-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v5
-; GFX11-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_cbranch_execnz .LBB20_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_min_f32 v0, v0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB20_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v3 :: v_dual_min_f32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s4, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v6, v8, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB20_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7450,95 +8429,186 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
}
define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory(ptr addrspace(7) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
-; GFX12-NEXT: s_mov_b32 s1, exec_lo
-; GFX12-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-NEXT: ; implicit-def: $vgpr4
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB21_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v5
-; GFX12-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB21_3: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Loop Header: Depth=1
-; GFX12-NEXT: ; Child Loop BB21_4 Depth 2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_min_num_f32 v5, v5, v9 :: v_dual_min_num_f32 v4, v4, v8
-; GFX12-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
-; GFX12-NEXT: v_mov_b32_e32 v5, v6
-; GFX12-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
-; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX12-NEXT: v_readfirstlane_b32 s4, v0
-; GFX12-NEXT: v_readfirstlane_b32 s5, v1
-; GFX12-NEXT: v_readfirstlane_b32 s6, v2
-; GFX12-NEXT: v_readfirstlane_b32 s7, v3
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB21_4
-; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB21_3
-; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v4
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
+; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
+; GFX12-TRUE16-NEXT: ; %bb.2:
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX12-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX12-TRUE16-NEXT: .LBB21_3: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v8 :: v_dual_min_num_f32 v4, v4, v9
+; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v4, v5
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
+; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
+; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
+; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3
+; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
+; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
+; GFX12-FAKE16-NEXT: ; %bb.2:
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB21_3: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v9 :: v_dual_min_num_f32 v4, v4, v8
+; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
+; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX12-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX12-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
+; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
+; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3
+; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7618,91 +8688,176 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_mov_b32_e32 v0, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB21_1
-; GFX11-NEXT: ; %bb.2:
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB21_3: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Loop Header: Depth=1
-; GFX11-NEXT: ; Child Loop BB21_4 Depth 2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX11-NEXT: s_mov_b32 s2, exec_lo
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_min_f32 v5, v5, v9 :: v_dual_min_f32 v4, v4, v8
-; GFX11-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
-; GFX11-NEXT: v_mov_b32_e32 v5, v6
-; GFX11-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
-; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
-; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
-; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
-; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB21_4
-; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
-; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB21_3
-; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v4
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
+; GFX11-TRUE16-NEXT: ; %bb.2:
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX11-TRUE16-NEXT: .LBB21_3: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v8 :: v_dual_min_f32 v4, v4, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
+; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-TRUE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
+; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_3
+; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
+; GFX11-FAKE16-NEXT: ; %bb.2:
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB21_3: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v9 :: v_dual_min_f32 v4, v4, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
+; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[0:1]
+; GFX11-FAKE16-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[2:3]
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
+; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_3
+; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
index 96603c1..de53982 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
@@ -18,7 +18,7 @@ target triple = "amdgcn--"
declare void @llvm.memcpy.p7.p7.i32(ptr addrspace(7), ptr addrspace(7), i32, i1)
-define amdgpu_kernel void @memcpy_known(ptr addrspace(7) inreg %src, ptr addrspace(7) inreg %dst) {
+define amdgpu_kernel void @memcpy_known(ptr addrspace(7) %src, ptr addrspace(7) %dst) {
; SDAG-LABEL: memcpy_known:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -235,14 +235,7 @@ define amdgpu_kernel void @memcpy_known(ptr addrspace(7) inreg %src, ptr addrspa
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: s_setpc_b64 s[30:31]
; SDAG-GFX942-LABEL: memcpy_known:
-; SDAG-GFX942: ; %bb.3:
-; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
-; SDAG-GFX942-NEXT: s_load_dword s12, s[4:5], 0x10
-; SDAG-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX942-NEXT: s_branch .LBB0_0
-; SDAG-GFX942-NEXT: .p2align 8
-; SDAG-GFX942-NEXT: ; %bb.4:
-; SDAG-GFX942-NEXT: .LBB0_0:
+; SDAG-GFX942: ; %bb.0:
; SDAG-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; SDAG-GFX942-NEXT: s_load_dword s17, s[4:5], 0x34
; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x44
@@ -594,7 +587,7 @@ define amdgpu_kernel void @memcpy_known(ptr addrspace(7) inreg %src, ptr addrspa
ret void
}
-define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) inreg %src, ptr addrspace(7) inreg %dst) {
+define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrspace(7) %dst) {
; SDAG-LABEL: memcpy_known_medium:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -802,14 +795,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) inreg %src, ptr
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: s_setpc_b64 s[30:31]
; SDAG-GFX942-LABEL: memcpy_known_medium:
-; SDAG-GFX942: ; %bb.3:
-; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
-; SDAG-GFX942-NEXT: s_load_dword s12, s[4:5], 0x10
-; SDAG-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX942-NEXT: s_branch .LBB1_0
-; SDAG-GFX942-NEXT: .p2align 8
-; SDAG-GFX942-NEXT: ; %bb.4:
-; SDAG-GFX942-NEXT: .LBB1_0:
+; SDAG-GFX942: ; %bb.0:
; SDAG-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; SDAG-GFX942-NEXT: s_load_dword s13, s[4:5], 0x34
; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x44
@@ -1154,7 +1140,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) inreg %src, ptr
ret void
}
-define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) inreg %src, ptr addrspace(7) inreg %dst) {
+define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) %src, ptr addrspace(7) %dst) {
; SDAG-LABEL: memcpy_known_small:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1191,14 +1177,7 @@ define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) inreg %src, ptr a
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: s_setpc_b64 s[30:31]
; SDAG-GFX942-LABEL: memcpy_known_small:
-; SDAG-GFX942: ; %bb.1:
-; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
-; SDAG-GFX942-NEXT: s_load_dword s12, s[4:5], 0x10
-; SDAG-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX942-NEXT: s_branch .LBB2_0
-; SDAG-GFX942-NEXT: .p2align 8
-; SDAG-GFX942-NEXT: ; %bb.2:
-; SDAG-GFX942-NEXT: .LBB2_0:
+; SDAG-GFX942: ; %bb.0:
; SDAG-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; SDAG-GFX942-NEXT: s_load_dword s13, s[4:5], 0x34
; SDAG-GFX942-NEXT: s_mov_b32 s12, 0
diff --git a/llvm/test/CodeGen/AMDGPU/combine_vloads.ll b/llvm/test/CodeGen/AMDGPU/combine_vloads.ll
index e2e3704..f2c6ac2 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_vloads.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_vloads.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=r600-- -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
;
@@ -9,10 +10,123 @@
; 128-bit loads instead of many 8-bit
-; EG-LABEL: {{^}}combine_vloads:
-; EG: VTX_READ_128
-; EG: VTX_READ_128
define amdgpu_kernel void @combine_vloads(ptr addrspace(1) nocapture %src, ptr addrspace(1) nocapture %result) nounwind {
+; EG-LABEL: combine_vloads:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 3, @16, KC0[CB0:0-32], KC1[]
+; EG-NEXT: LOOP_START_DX10 @10
+; EG-NEXT: TEX 1 @12
+; EG-NEXT: ALU 86, @20, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T14.XY, T15.X, 0
+; EG-NEXT: ALU_PUSH_BEFORE 4, @107, KC0[], KC1[]
+; EG-NEXT: JUMP @9 POP:1
+; EG-NEXT: LOOP_BREAK @9
+; EG-NEXT: POP @9 POP:1
+; EG-NEXT: END_LOOP @2
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 12:
+; EG-NEXT: VTX_READ_128 T14.XYZW, T13.X, 0, #1
+; EG-NEXT: VTX_READ_128 T15.XYZW, T13.X, 16, #1
+; EG-NEXT: ALU clause starting at 16:
+; EG-NEXT: MOV T13.X, KC0[2].Y,
+; EG-NEXT: MOV T0.W, KC0[2].Z,
+; EG-NEXT: MOV * T1.W, literal.x,
+; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 20:
+; EG-NEXT: LSHR T2.W, T14.Y, literal.x,
+; EG-NEXT: LSHR * T3.W, T14.W, literal.x,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T2.W, PV.W, PS,
+; EG-NEXT: LSHR * T3.W, T15.Y, literal.x,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR T0.Y, T14.Y, literal.x,
+; EG-NEXT: LSHR T0.Z, T14.W, literal.x,
+; EG-NEXT: ADD_INT T2.W, PV.W, PS,
+; EG-NEXT: LSHR * T3.W, T15.W, literal.y,
+; EG-NEXT: 8(1.121039e-44), 24(3.363116e-44)
+; EG-NEXT: ADD_INT T16.X, PV.W, PS,
+; EG-NEXT: ADD_INT T0.Y, PV.Y, PV.Z,
+; EG-NEXT: LSHR T0.Z, T15.Y, literal.x,
+; EG-NEXT: LSHR T2.W, T14.X, literal.y,
+; EG-NEXT: LSHR * T3.W, T14.Z, literal.y,
+; EG-NEXT: 8(1.121039e-44), 24(3.363116e-44)
+; EG-NEXT: ADD_INT T17.X, PV.W, PS,
+; EG-NEXT: ADD_INT T0.Y, PV.Y, PV.Z,
+; EG-NEXT: LSHR T0.Z, T15.W, literal.x,
+; EG-NEXT: LSHR T2.W, T14.Y, literal.y,
+; EG-NEXT: LSHR * T3.W, T14.W, literal.y,
+; EG-NEXT: 8(1.121039e-44), 16(2.242078e-44)
+; EG-NEXT: LSHR T18.X, T15.X, literal.x,
+; EG-NEXT: LSHR T1.Y, T14.X, literal.y, BS:VEC_120/SCL_212
+; EG-NEXT: ADD_INT T1.Z, PV.W, PS,
+; EG-NEXT: LSHR T2.W, T15.Y, literal.z,
+; EG-NEXT: ADD_INT * T3.W, PV.Y, PV.Z,
+; EG-NEXT: 24(3.363116e-44), 8(1.121039e-44)
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR T19.X, T14.Z, literal.x,
+; EG-NEXT: ADD_INT T0.Y, T14.Y, T14.W,
+; EG-NEXT: AND_INT T0.Z, PS, literal.y,
+; EG-NEXT: ADD_INT T2.W, PV.Z, PV.W,
+; EG-NEXT: LSHR * T3.W, T15.W, literal.z,
+; EG-NEXT: 8(1.121039e-44), 255(3.573311e-43)
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T20.X, PV.W, PS,
+; EG-NEXT: LSHL T2.Y, PV.Z, literal.x,
+; EG-NEXT: ADD_INT T0.Z, PV.Y, T15.Y,
+; EG-NEXT: ADD_INT T2.W, T1.Y, PV.X,
+; EG-NEXT: LSHR * T3.W, T15.X, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T19.X, T14.X, T14.Z,
+; EG-NEXT: ADD_INT T0.Y, PV.W, PS,
+; EG-NEXT: LSHR T1.Z, T15.Z, literal.x,
+; EG-NEXT: LSHR T2.W, T14.X, literal.y,
+; EG-NEXT: LSHR * T3.W, T14.Z, literal.y,
+; EG-NEXT: 8(1.121039e-44), 16(2.242078e-44)
+; EG-NEXT: ADD_INT T14.X, PV.W, PS,
+; EG-NEXT: LSHR T1.Y, T15.X, literal.x,
+; EG-NEXT: ADD_INT T1.Z, PV.Y, PV.Z,
+; EG-NEXT: ADD_INT T2.W, PV.X, T15.X,
+; EG-NEXT: ADD_INT * T3.W, T0.Z, T15.W,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: AND_INT T15.X, PS, literal.x,
+; EG-NEXT: ADD_INT T0.Y, PV.W, T15.Z,
+; EG-NEXT: AND_INT T0.Z, PV.Z, literal.x,
+; EG-NEXT: ADD_INT T2.W, PV.X, PV.Y,
+; EG-NEXT: LSHR * T3.W, T15.Z, literal.y,
+; EG-NEXT: 255(3.573311e-43), 16(2.242078e-44)
+; EG-NEXT: ADD_INT T14.X, PV.W, PS,
+; EG-NEXT: LSHL T1.Y, PV.Z, literal.x,
+; EG-NEXT: AND_INT T0.Z, PV.Y, literal.y,
+; EG-NEXT: OR_INT T2.W, PV.X, T2.Y,
+; EG-NEXT: LSHL * T3.W, T20.X, literal.z,
+; EG-NEXT: 8(1.121039e-44), 255(3.573311e-43)
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: OR_INT T15.X, PV.W, PS,
+; EG-NEXT: OR_INT T0.Y, PV.Z, PV.Y,
+; EG-NEXT: LSHL T0.Z, PV.X, literal.x,
+; EG-NEXT: ADD_INT T2.W, T17.X, T18.X,
+; EG-NEXT: LSHR * T3.W, T15.Z, literal.y,
+; EG-NEXT: 16(2.242078e-44), 24(3.363116e-44)
+; EG-NEXT: ADD_INT T1.Y, PV.W, PS,
+; EG-NEXT: OR_INT T0.Z, PV.Y, PV.Z,
+; EG-NEXT: AND_INT T2.W, PV.X, literal.x,
+; EG-NEXT: LSHL * T3.W, T16.X, literal.y,
+; EG-NEXT: 16777215(2.350989e-38), 24(3.363116e-44)
+; EG-NEXT: OR_INT T14.Y, PV.W, PS,
+; EG-NEXT: AND_INT T2.W, PV.Z, literal.x,
+; EG-NEXT: LSHL * T3.W, PV.Y, literal.y,
+; EG-NEXT: 16777215(2.350989e-38), 24(3.363116e-44)
+; EG-NEXT: OR_INT T14.X, PV.W, PS,
+; EG-NEXT: ADD_INT * T2.W, T0.W, T1.W,
+; EG-NEXT: LSHR * T15.X, PV.W, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: ALU clause starting at 107:
+; EG-NEXT: ADD_INT * T1.W, T1.W, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: SETE_INT * T2.W, PV.W, literal.x,
+; EG-NEXT: 8192(1.147944e-41), 0(0.000000e+00)
+; EG-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
entry:
br label %for.body
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
index fdc15a3..e13c895 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -8147,50 +8149,95 @@ define void @flat_agent_atomic_fadd_noret_f64__offset12b_neg__amdgpu_no_fine_gra
; --------------------------------------------------------------------
define half @flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB36_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8225,45 +8272,85 @@ define half @flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory(ptr %
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB36_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8436,51 +8523,97 @@ define half @flat_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory(ptr %
}
define half @flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB37_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8517,46 +8650,87 @@ define half @flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grain
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB37_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8735,51 +8909,97 @@ define half @flat_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grain
}
define half @flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB38_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8817,46 +9037,87 @@ define half @flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grain
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB38_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9035,48 +9296,91 @@ define half @flat_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grain
}
define void @flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB39_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9110,43 +9414,81 @@ define void @flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory(ptr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB39_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9313,49 +9655,93 @@ define void @flat_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory(ptr
}
define void @flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB40_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9391,44 +9777,83 @@ define void @flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_gra
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB40_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9601,49 +10026,93 @@ define void @flat_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_gra
}
define void @flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB41_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9680,44 +10149,83 @@ define void @flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_gra
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB41_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9890,37 +10398,69 @@ define void @flat_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_gra
}
define void @flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v4, v[0:1] offset:2046
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_add_f16_e32 v3, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB42_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v4.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9946,32 +10486,59 @@ define void @flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v4, v[0:1] offset:2046
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_f16_e32 v3, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB42_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v4.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10105,39 +10672,73 @@ define void @flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_
}
define half @flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v4, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB43_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v4.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10164,34 +10765,63 @@ define half @flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fi
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v4, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB43_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v4.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10328,52 +10958,99 @@ define half @flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fi
}
define half @flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB44_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10410,46 +11087,87 @@ define half @flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grai
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB44_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10630,50 +11348,95 @@ define half @flat_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grai
}
define void @flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB45_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10709,44 +11472,83 @@ define void @flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB45_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10925,59 +11727,114 @@ define void @flat_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_gr
; --------------------------------------------------------------------
define bfloat @flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB46_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB46_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB46_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB46_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11021,54 +11878,104 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory(pt
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB46_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB46_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB46_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB46_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11268,61 +12175,118 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory(pt
}
define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB47_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB47_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB47_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB47_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11368,56 +12332,108 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB47_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB47_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB47_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB47_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11623,61 +12639,118 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_gr
}
define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB48_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB48_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB48_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB48_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11724,56 +12797,108 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB48_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB48_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB48_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB48_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11979,59 +13104,114 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_gr
}
define void @flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB49_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB49_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB49_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -12076,54 +13256,104 @@ define void @flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB49_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB49_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB49_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -12323,59 +13553,114 @@ define void @flat_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_gr
}
define void @flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB50_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -12421,54 +13706,104 @@ define void @flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB50_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -12668,49 +14003,94 @@ define void @flat_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_gr
}
define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX12-NEXT: v_add_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB51_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -12747,44 +14127,84 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB51_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -12954,47 +14374,90 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no
}
define void @flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB52_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -13030,42 +14493,80 @@ define void @flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB52_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -13232,57 +14733,110 @@ define void @flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no
}
define void @flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_add_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB53_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -13325,52 +14879,100 @@ define void @flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory(pt
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB53_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -13564,62 +15166,120 @@ define void @flat_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory(pt
}
define bfloat @flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB54_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -13665,56 +15325,108 @@ define bfloat @flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB54_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -13922,60 +15634,116 @@ define bfloat @flat_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_g
}
define void @flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB55_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14020,54 +15788,104 @@ define void @flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB55_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16574,54 +18392,104 @@ define <2 x bfloat> @flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_m
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB68_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB68_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB68_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB68_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB68_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB68_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16850,54 +18718,104 @@ define <2 x bfloat> @flat_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB69_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB69_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB69_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB69_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB69_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB69_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -17132,59 +19050,113 @@ define <2 x bfloat> @flat_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
-; GFX11-NEXT: flat_load_b32 v0, v[4:5]
-; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB70_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v0, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB70_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v0, v[4:5]
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB70_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB70_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v0, v[4:5]
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB70_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB70_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -17422,52 +19394,100 @@ define void @flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB71_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB71_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB71_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB71_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB71_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB71_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -17690,52 +19710,100 @@ define void @flat_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB72_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB72_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB72_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB72_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB72_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB72_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -17968,57 +20036,110 @@ define void @flat_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: flat_load_b32 v3, v[3:4]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB73_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB73_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[3:4]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB73_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB73_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[3:4]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB73_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB73_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -18257,54 +20378,104 @@ define <2 x bfloat> @flat_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_n
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB74_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB74_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB74_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB74_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB74_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB74_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -18539,52 +20710,100 @@ define void @flat_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB75_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB75_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB75_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB75_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB75_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB75_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -18816,54 +21035,104 @@ define <2 x bfloat> @flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory(
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB76_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB76_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB76_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB76_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB76_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB76_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -19092,52 +21361,100 @@ define void @flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory(ptr %p
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB77_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB77_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB77_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB77_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB77_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB77_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -19360,54 +21677,104 @@ define <2 x bfloat> @flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_m
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB78_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB78_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB78_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB78_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB78_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB78_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -19636,52 +22003,100 @@ define void @flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory_
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB79_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB79_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB79_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB79_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB79_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB79_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll
index b29a5d0..d2cbc25 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -6019,52 +6021,99 @@ define double @flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory__am
; --------------------------------------------------------------------
define half @flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6101,47 +6150,89 @@ define half @flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory(ptr %
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6322,53 +6413,103 @@ define half @flat_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory(ptr %
}
define half @flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6407,48 +6548,93 @@ define half @flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grain
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6635,53 +6821,103 @@ define half @flat_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grain
}
define half @flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB28_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6721,48 +6957,93 @@ define half @flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grain
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB28_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6949,51 +7230,97 @@ define half @flat_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grain
}
define void @flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB29_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v3.l, v3.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7029,46 +7356,87 @@ define void @flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory(ptr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB29_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v3.l, v3.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7243,52 +7611,101 @@ define void @flat_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory(ptr
}
define void @flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB30_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7326,47 +7743,91 @@ define void @flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_gra
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB30_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7547,52 +8008,101 @@ define void @flat_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_gra
}
define void @flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB31_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7631,47 +8141,91 @@ define void @flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_gra
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB31_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7852,41 +8406,77 @@ define void @flat_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_gra
}
define half @flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, v4, v4
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB32_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v4, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7915,36 +8505,67 @@ define half @flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fi
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, v4, v4
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB32_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v4, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8089,40 +8710,75 @@ define half @flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fi
}
define void @flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_max_num_f16_e32 v4, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v4
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB33_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v4.l, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8150,35 +8806,65 @@ define void @flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_max_f16_e32 v4, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f16_e32 v2, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB33_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v4.l, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8320,54 +9006,105 @@ define void @flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_
}
define half @flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB34_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8406,48 +9143,93 @@ define half @flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grai
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB34_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8636,53 +9418,103 @@ define half @flat_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grai
}
define void @flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB35_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8720,47 +9552,91 @@ define void @flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB35_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8947,59 +9823,114 @@ define void @flat_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_gr
; --------------------------------------------------------------------
define bfloat @flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB36_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9043,54 +9974,104 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory(pt
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB36_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9291,61 +10272,118 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory(pt
}
define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB37_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9391,56 +10429,108 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB37_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9647,61 +10737,118 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_gr
}
define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB38_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9748,56 +10895,108 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB38_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10004,57 +11203,110 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_gr
}
define void @flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB39_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10097,52 +11349,100 @@ define void @flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory(pt
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB39_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10337,59 +11637,114 @@ define void @flat_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory(pt
}
define void @flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB40_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10434,54 +11789,104 @@ define void @flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB40_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10682,59 +12087,114 @@ define void @flat_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_gr
}
define void @flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB41_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10780,54 +12240,104 @@ define void @flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB41_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11028,49 +12538,94 @@ define void @flat_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_gr
}
define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB42_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11107,44 +12662,84 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB42_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11315,47 +12910,90 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no
}
define void @flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB43_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11391,42 +13029,80 @@ define void @flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB43_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11594,62 +13270,120 @@ define void @flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no
}
define bfloat @flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB44_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11695,56 +13429,108 @@ define bfloat @flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB44_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11953,60 +13739,116 @@ define bfloat @flat_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_g
}
define void @flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB45_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -12051,54 +13893,104 @@ define void @flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB45_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14234,57 +16126,111 @@ define void @flat_system_atomic_fmax_noret_v2f16__offset12b_pos__amdgpu_no_fine_
; --------------------------------------------------------------------
define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB54_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14328,54 +16274,104 @@ define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_m
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB54_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14581,57 +16577,111 @@ define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_m
}
define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB55_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14675,54 +16725,104 @@ define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB55_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14931,57 +17031,111 @@ define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no
}
define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB56_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15032,59 +17186,113 @@ define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
-; GFX11-NEXT: flat_load_b32 v0, v[4:5]
-; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v0, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB56_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v0, v[4:5]
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v0, v[4:5]
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15299,55 +17507,107 @@ define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no
}
define void @flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_max_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB57_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15390,52 +17650,100 @@ define void @flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB57_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15635,55 +17943,107 @@ define void @flat_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory(
}
define void @flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_max_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB58_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15726,52 +18086,100 @@ define void @flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB58_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15978,55 +18386,107 @@ define void @flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_
}
define void @flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_max_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB59_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16075,57 +18535,110 @@ define void @flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: flat_load_b32 v3, v[3:4]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB59_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[3:4]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[3:4]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16340,58 +18853,113 @@ define void @flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_
}
define <2 x bfloat> @flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB60_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16435,54 +19003,104 @@ define <2 x bfloat> @flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_n
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB60_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16693,56 +19311,109 @@ define <2 x bfloat> @flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_n
}
define void @flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_max_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB61_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16785,52 +19456,100 @@ define void @flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB61_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll
index 9b682179..805848f 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -6019,52 +6021,99 @@ define double @flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory__am
; --------------------------------------------------------------------
define half @flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6101,47 +6150,89 @@ define half @flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory(ptr %
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6322,53 +6413,103 @@ define half @flat_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory(ptr %
}
define half @flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6407,48 +6548,93 @@ define half @flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grain
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6635,53 +6821,103 @@ define half @flat_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grain
}
define half @flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB28_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6721,48 +6957,93 @@ define half @flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grain
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB28_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6949,51 +7230,97 @@ define half @flat_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grain
}
define void @flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
-; GFX12-NEXT: v_min_num_f16_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB29_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v3.l, v3.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7029,46 +7356,87 @@ define void @flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory(ptr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX11-NEXT: v_min_f16_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB29_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v3.l, v3.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7243,52 +7611,101 @@ define void @flat_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory(ptr
}
define void @flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB30_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7326,47 +7743,91 @@ define void @flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_gra
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_min_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB30_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7547,52 +8008,101 @@ define void @flat_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_gra
}
define void @flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB31_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7631,47 +8141,91 @@ define void @flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_gra
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_min_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB31_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7852,41 +8406,77 @@ define void @flat_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_gra
}
define half @flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, v4, v4
-; GFX12-NEXT: v_min_num_f16_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB32_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v4, v4
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7915,36 +8505,67 @@ define half @flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fi
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, v4, v4
-; GFX11-NEXT: v_min_f16_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB32_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v4, v4
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8089,40 +8710,75 @@ define half @flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fi
}
define void @flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_max_num_f16_e32 v4, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v4
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB33_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v4.l, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8150,35 +8806,65 @@ define void @flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_max_f16_e32 v4, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f16_e32 v2, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v2, v2, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB33_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v4.l, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8320,54 +9006,105 @@ define void @flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_
}
define half @flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB34_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8406,48 +9143,93 @@ define half @flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grai
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB34_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8636,53 +9418,103 @@ define half @flat_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grai
}
define void @flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB35_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8720,47 +9552,91 @@ define void @flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_min_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB35_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: flat_load_b32 v6, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8947,59 +9823,114 @@ define void @flat_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_gr
; --------------------------------------------------------------------
define bfloat @flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB36_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9043,54 +9974,104 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory(pt
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB36_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9291,61 +10272,118 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory(pt
}
define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB37_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9391,56 +10429,108 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB37_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9647,61 +10737,118 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_gr
}
define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB38_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9748,56 +10895,108 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB38_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10004,57 +11203,110 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_gr
}
define void @flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB39_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10097,52 +11349,100 @@ define void @flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory(pt
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB39_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10337,59 +11637,114 @@ define void @flat_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory(pt
}
define void @flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB40_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10434,54 +11789,104 @@ define void @flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB40_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10682,59 +12087,114 @@ define void @flat_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_gr
}
define void @flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB41_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10780,54 +12240,104 @@ define void @flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_gr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB41_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11028,49 +12538,94 @@ define void @flat_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_gr
}
define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB42_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11107,44 +12662,84 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB42_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11315,47 +12910,90 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no
}
define void @flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB43_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11391,42 +13029,80 @@ define void @flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB43_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11594,62 +13270,120 @@ define void @flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no
}
define bfloat @flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB44_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11695,56 +13429,108 @@ define bfloat @flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB44_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11953,60 +13739,116 @@ define bfloat @flat_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_g
}
define void @flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB45_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -12051,54 +13893,104 @@ define void @flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB45_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14234,57 +16126,111 @@ define void @flat_system_atomic_fmin_noret_v2f16__offset12b_pos__amdgpu_no_fine_
; --------------------------------------------------------------------
define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB54_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14328,54 +16274,104 @@ define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_m
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB54_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14581,57 +16577,111 @@ define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_m
}
define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB55_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14675,54 +16725,104 @@ define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB55_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14931,57 +17031,111 @@ define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no
}
define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB56_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15032,59 +17186,113 @@ define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
-; GFX11-NEXT: flat_load_b32 v0, v[4:5]
-; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v0, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB56_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v0, v[4:5]
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v0, v[4:5]
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15299,55 +17507,107 @@ define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no
}
define void @flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_min_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB57_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15390,52 +17650,100 @@ define void @flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB57_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15635,55 +17943,107 @@ define void @flat_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory(
}
define void @flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_min_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB58_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15726,52 +18086,100 @@ define void @flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB58_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15978,55 +18386,107 @@ define void @flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_
}
define void @flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_min_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB59_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16075,57 +18535,110 @@ define void @flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: flat_load_b32 v3, v[3:4]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB59_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[3:4]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[3:4]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16340,58 +18853,113 @@ define void @flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_
}
define <2 x bfloat> @flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB60_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16435,54 +19003,104 @@ define <2 x bfloat> @flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_n
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB60_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16693,56 +19311,109 @@ define <2 x bfloat> @flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_n
}
define void @flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_min_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB61_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16785,52 +19456,100 @@ define void @flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB61_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
index 98d7d259..e0138d58 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -5832,50 +5834,95 @@ define void @flat_agent_atomic_fsub_noret_f64__offset12b_neg(ptr %ptr, double %v
; --------------------------------------------------------------------
define half @flat_agent_atomic_fsub_ret_f16(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB22_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB22_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB22_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_f16:
; GFX942: ; %bb.0:
@@ -5910,45 +5957,85 @@ define half @flat_agent_atomic_fsub_ret_f16(ptr %ptr, half %val) #0 {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB22_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB22_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB22_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_f16:
; GFX10: ; %bb.0:
@@ -6121,51 +6208,97 @@ define half @flat_agent_atomic_fsub_ret_f16(ptr %ptr, half %val) #0 {
}
define half @flat_agent_atomic_fsub_ret_f16__offset12b_pos(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB23_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB23_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB23_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -6202,46 +6335,87 @@ define half @flat_agent_atomic_fsub_ret_f16__offset12b_pos(ptr %ptr, half %val)
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB23_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB23_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB23_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -6420,51 +6594,97 @@ define half @flat_agent_atomic_fsub_ret_f16__offset12b_pos(ptr %ptr, half %val)
}
define half @flat_agent_atomic_fsub_ret_f16__offset12b_neg(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB24_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -6502,46 +6722,87 @@ define half @flat_agent_atomic_fsub_ret_f16__offset12b_neg(ptr %ptr, half %val)
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB24_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -6720,48 +6981,91 @@ define half @flat_agent_atomic_fsub_ret_f16__offset12b_neg(ptr %ptr, half %val)
}
define void @flat_agent_atomic_fsub_noret_f16(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB25_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_f16:
; GFX942: ; %bb.0:
@@ -6795,43 +7099,81 @@ define void @flat_agent_atomic_fsub_noret_f16(ptr %ptr, half %val) #0 {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB25_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_f16:
; GFX10: ; %bb.0:
@@ -6998,49 +7340,93 @@ define void @flat_agent_atomic_fsub_noret_f16(ptr %ptr, half %val) #0 {
}
define void @flat_agent_atomic_fsub_noret_f16__offset12b_pos(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -7076,44 +7462,83 @@ define void @flat_agent_atomic_fsub_noret_f16__offset12b_pos(ptr %ptr, half %val
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -7286,49 +7711,93 @@ define void @flat_agent_atomic_fsub_noret_f16__offset12b_pos(ptr %ptr, half %val
}
define void @flat_agent_atomic_fsub_noret_f16__offset12b_neg(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -7365,44 +7834,83 @@ define void @flat_agent_atomic_fsub_noret_f16__offset12b_neg(ptr %ptr, half %val
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -7575,39 +8083,73 @@ define void @flat_agent_atomic_fsub_noret_f16__offset12b_neg(ptr %ptr, half %val
}
define half @flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v4, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB28_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v4.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
; GFX942: ; %bb.0:
@@ -7634,34 +8176,63 @@ define half @flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4(ptr %ptr, hal
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v4, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB28_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v4.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
; GFX10: ; %bb.0:
@@ -7798,37 +8369,69 @@ define half @flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4(ptr %ptr, hal
}
define void @flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v4, v[0:1] offset:2046
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_sub_f16_e32 v3, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB29_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v4.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
; GFX942: ; %bb.0:
@@ -7854,32 +8457,59 @@ define void @flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos(ptr %ptr, h
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v4, v[0:1] offset:2046
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_sub_f16_e32 v3, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB29_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v4.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
; GFX10: ; %bb.0:
@@ -8013,52 +8643,99 @@ define void @flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos(ptr %ptr, h
}
define half @flat_system_atomic_fsub_ret_f16__offset12b_pos(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fsub_ret_f16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB30_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fsub_ret_f16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fsub_ret_f16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fsub_ret_f16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -8095,46 +8772,87 @@ define half @flat_system_atomic_fsub_ret_f16__offset12b_pos(ptr %ptr, half %val)
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fsub_ret_f16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB30_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fsub_ret_f16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fsub_ret_f16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fsub_ret_f16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -8315,50 +9033,95 @@ define half @flat_system_atomic_fsub_ret_f16__offset12b_pos(ptr %ptr, half %val)
}
define void @flat_system_atomic_fsub_noret_f16__offset12b_pos(ptr %ptr, half %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fsub_noret_f16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB31_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fsub_noret_f16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fsub_noret_f16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fsub_noret_f16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -8394,44 +9157,83 @@ define void @flat_system_atomic_fsub_noret_f16__offset12b_pos(ptr %ptr, half %va
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fsub_noret_f16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB31_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fsub_noret_f16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fsub_noret_f16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fsub_noret_f16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -8610,59 +9412,114 @@ define void @flat_system_atomic_fsub_noret_f16__offset12b_pos(ptr %ptr, half %va
; --------------------------------------------------------------------
define bfloat @flat_agent_atomic_fsub_ret_bf16(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB32_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_bf16:
; GFX942: ; %bb.0:
@@ -8706,54 +9563,104 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16(ptr %ptr, bfloat %val) #0 {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB32_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_bf16:
; GFX10: ; %bb.0:
@@ -8953,61 +9860,118 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16(ptr %ptr, bfloat %val) #0 {
}
define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_pos(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB33_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -9053,56 +10017,108 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_pos(ptr %ptr, bfloat %
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB33_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -9308,61 +10324,118 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_pos(ptr %ptr, bfloat %
}
define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_neg(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB34_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -9409,56 +10482,108 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_neg(ptr %ptr, bfloat %
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB34_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -9664,57 +10789,110 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_neg(ptr %ptr, bfloat %
}
define void @flat_agent_atomic_fsub_noret_bf16(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: flat_load_b32 v4, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB35_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_bf16:
; GFX942: ; %bb.0:
@@ -9757,52 +10935,100 @@ define void @flat_agent_atomic_fsub_noret_bf16(ptr %ptr, bfloat %val) #0 {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: flat_load_b32 v4, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB35_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v4, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_bf16:
; GFX10: ; %bb.0:
@@ -9996,59 +11222,114 @@ define void @flat_agent_atomic_fsub_noret_bf16(ptr %ptr, bfloat %val) #0 {
}
define void @flat_agent_atomic_fsub_noret_bf16__offset12b_pos(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB36_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -10093,54 +11374,104 @@ define void @flat_agent_atomic_fsub_noret_bf16__offset12b_pos(ptr %ptr, bfloat %
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB36_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -10340,59 +11671,114 @@ define void @flat_agent_atomic_fsub_noret_bf16__offset12b_pos(ptr %ptr, bfloat %
}
define void @flat_agent_atomic_fsub_noret_bf16__offset12b_neg(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB37_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -10438,54 +11824,104 @@ define void @flat_agent_atomic_fsub_noret_bf16__offset12b_neg(ptr %ptr, bfloat %
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB37_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -10685,49 +12121,94 @@ define void @flat_agent_atomic_fsub_noret_bf16__offset12b_neg(ptr %ptr, bfloat %
}
define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB38_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
; GFX942: ; %bb.0:
@@ -10764,44 +12245,84 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4(ptr %ptr,
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB38_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
; GFX10: ; %bb.0:
@@ -10971,47 +12492,90 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4(ptr %ptr,
}
define void @flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB39_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
; GFX942: ; %bb.0:
@@ -11047,42 +12611,80 @@ define void @flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos(ptr %ptr,
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB39_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
; GFX10: ; %bb.0:
@@ -11249,62 +12851,120 @@ define void @flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos(ptr %ptr,
}
define bfloat @flat_system_atomic_fsub_ret_bf16__offset12b_pos(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fsub_ret_bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v5, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB40_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fsub_ret_bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -11350,56 +13010,108 @@ define bfloat @flat_system_atomic_fsub_ret_bf16__offset12b_pos(ptr %ptr, bfloat
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fsub_ret_bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v5, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB40_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v5, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fsub_ret_bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -11607,60 +13319,116 @@ define bfloat @flat_system_atomic_fsub_ret_bf16__offset12b_pos(ptr %ptr, bfloat
}
define void @flat_system_atomic_fsub_noret_bf16__offset12b_pos(ptr %ptr, bfloat %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fsub_noret_bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB41_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fsub_noret_bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -11705,54 +13473,104 @@ define void @flat_system_atomic_fsub_noret_bf16__offset12b_pos(ptr %ptr, bfloat
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fsub_noret_bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB41_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fsub_noret_bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -13743,57 +15561,111 @@ define void @flat_system_atomic_fsub_noret_v2f16__offset12b_pos(ptr %ptr, <2 x h
; --------------------------------------------------------------------
define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB50_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_v2bf16:
; GFX942: ; %bb.0:
@@ -13837,54 +15709,104 @@ define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16(ptr %ptr, <2 x bfloat> %v
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB50_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_v2bf16:
; GFX10: ; %bb.0:
@@ -14090,57 +16012,111 @@ define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16(ptr %ptr, <2 x bfloat> %v
}
define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB51_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -14184,54 +16160,104 @@ define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos(ptr %ptr,
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB51_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -14440,57 +16466,111 @@ define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos(ptr %ptr,
}
define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB52_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -14541,59 +16621,113 @@ define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg(ptr %ptr,
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
-; GFX11-NEXT: flat_load_b32 v0, v[4:5]
-; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v0, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB52_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
+; GFX11-TRUE16-NEXT: flat_load_b32 v0, v[4:5]
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v0, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v0, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v3
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v3
+; GFX11-FAKE16-NEXT: flat_load_b32 v0, v[4:5]
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v0, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v0, v[3:4], v[5:6] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -14808,55 +16942,107 @@ define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg(ptr %ptr,
}
define void @flat_agent_atomic_fsub_noret_v2bf16(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1]
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB53_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_v2bf16:
; GFX942: ; %bb.0:
@@ -14899,52 +17085,100 @@ define void @flat_agent_atomic_fsub_noret_v2bf16(ptr %ptr, <2 x bfloat> %val) #0
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB53_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_v2bf16:
; GFX10: ; %bb.0:
@@ -15144,55 +17378,107 @@ define void @flat_agent_atomic_fsub_noret_v2bf16(ptr %ptr, <2 x bfloat> %val) #0
}
define void @flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB54_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -15235,52 +17521,100 @@ define void @flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos(ptr %ptr, <2 x b
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB54_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -15487,55 +17821,107 @@ define void @flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos(ptr %ptr, <2 x b
}
define void @flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB55_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -15584,57 +17970,110 @@ define void @flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg(ptr %ptr, <2 x b
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: flat_load_b32 v3, v[3:4]
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB55_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[3:4]
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[3:4]
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -15849,58 +18288,113 @@ define void @flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg(ptr %ptr, <2 x b
}
define <2 x bfloat> @flat_system_atomic_fsub_ret_v2bf16__offset12b_pos(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fsub_ret_v2bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB56_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fsub_ret_v2bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -15944,54 +18438,104 @@ define <2 x bfloat> @flat_system_atomic_fsub_ret_v2bf16__offset12b_pos(ptr %ptr,
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fsub_ret_v2bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB56_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[5:6] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fsub_ret_v2bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -16202,56 +18746,109 @@ define <2 x bfloat> @flat_system_atomic_fsub_ret_v2bf16__offset12b_pos(ptr %ptr,
}
define void @flat_system_atomic_fsub_noret_v2bf16__offset12b_pos(ptr %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: flat_system_atomic_fsub_noret_v2bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB57_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: flat_system_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: flat_system_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: flat_system_atomic_fsub_noret_v2bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -16294,52 +18891,100 @@ define void @flat_system_atomic_fsub_noret_v2bf16__offset12b_pos(ptr %ptr, <2 x
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: flat_system_atomic_fsub_noret_v2bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB57_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: flat_system_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: flat_system_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: flat_load_b32 v3, v[0:1] offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: flat_system_atomic_fsub_noret_v2bf16__offset12b_pos:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
index 690e5cc..bcd5d1e 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
@@ -1,10 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck %s -check-prefixes=GFX942-SDAG
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck %s -check-prefixes=GFX942-GISEL
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefixes=GFX11-SDAG
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefixes=GFX11-GISEL
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s -check-prefixes=GFX12-SDAG
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s -check-prefixes=GFX12-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11-SDAG,GFX11-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11-SDAG,GFX11-SDAG-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11-GISEL,GFX11-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11-GISEL,GFX11-GISEL-FAKE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12-SDAG,GFX12-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12-SDAG,GFX12-SDAG-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12-GISEL,GFX12-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12-GISEL,GFX12-GISEL-FAKE16
; Test flat scratch SVS addressing mode with various combinations of alignment
; of soffset, voffset and inst_offset.
@@ -52,24 +56,45 @@ define amdgpu_kernel void @soff1_voff1(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff1_voff1:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v4, 1, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v5, 2, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v4, v1, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v5, v2, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v3, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff1_voff1:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 1, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v4, 2, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v2, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v3, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v4, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff1_voff1:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v4, 1, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v5, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v0, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v4, v1, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v5, v2, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff1_voff1:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -89,19 +114,35 @@ define amdgpu_kernel void @soff1_voff1(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff1_voff1:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff1_voff1:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 2
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v0, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff1_voff1:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff1_voff1:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -177,26 +218,49 @@ define amdgpu_kernel void @soff1_voff2(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff1_voff2:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mul_u32_u24_e32 v0, 2, v0
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v4, 1, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v5, 2, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v4, v1, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v5, v2, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v3, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff1_voff2:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 1, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v4, 2, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v2, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v3, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v4, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff1_voff2:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v4, 1, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v5, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v0, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v4, v1, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v5, v2, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff1_voff2:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -219,21 +283,39 @@ define amdgpu_kernel void @soff1_voff2(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff1_voff2:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v0, 2, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff1_voff2:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v2, 2, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v0, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff1_voff2:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff1_voff2:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -310,26 +392,49 @@ define amdgpu_kernel void @soff1_voff4(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff1_voff4:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mul_u32_u24_e32 v0, 4, v0
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v4, 1, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v5, 2, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v4, v1, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v5, v2, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v3, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff1_voff4:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 1, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v4, 2, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v2, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v3, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v4, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff1_voff4:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v4, 1, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v5, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v0, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v4, v1, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v5, v2, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff1_voff4:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -352,21 +457,39 @@ define amdgpu_kernel void @soff1_voff4(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff1_voff4:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v0, 4, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff1_voff4:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v2, 4, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v0, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff1_voff4:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff1_voff4:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -443,26 +566,49 @@ define amdgpu_kernel void @soff2_voff1(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff2_voff1:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_lshl_b32 s0, s0, 1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v4, 1, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v5, 2, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v4, v1, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v5, v2, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v3, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff2_voff1:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 1, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v4, 2, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v2, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v3, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v4, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff2_voff1:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v4, 1, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v5, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v0, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v4, v1, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v5, v2, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff2_voff1:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -485,20 +631,37 @@ define amdgpu_kernel void @soff2_voff1(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff2_voff1:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v3, 4 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: s_lshl_b32 s0, s0, 1
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff2_voff1:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.h, 2
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v1, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff2_voff1:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, 4 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff2_voff1:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -576,27 +739,51 @@ define amdgpu_kernel void @soff2_voff2(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff2_voff2:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_u32_u24_e32 v0, 2, v0
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_lshl_b32 s0, s0, 1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v4, 2, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v5, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v1, off offset:1 dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v4, v2, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v5, v3, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff2_voff2:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 2, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v4, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, off offset:1 dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v3, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v4, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff2_voff2:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v4, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v5, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, off offset:1 dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v4, v2, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v5, v3, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff2_voff2:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -621,22 +808,41 @@ define amdgpu_kernel void @soff2_voff2(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff2_voff2:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v0, 2, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: s_lshl_b32 s0, s0, 1
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff2_voff2:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v2, 2, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v0, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff2_voff2:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff2_voff2:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -716,27 +922,51 @@ define amdgpu_kernel void @soff2_voff4(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff2_voff4:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_u32_u24_e32 v0, 4, v0
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_lshl_b32 s0, s0, 1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v4, 2, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v5, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v1, off offset:1 dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v4, v2, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v5, v3, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff2_voff4:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 2, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v4, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, off offset:1 dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v3, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v4, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff2_voff4:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v4, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v5, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, off offset:1 dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v4, v2, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v5, v3, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff2_voff4:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -761,22 +991,41 @@ define amdgpu_kernel void @soff2_voff4(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff2_voff4:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v0, 4, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: s_lshl_b32 s0, s0, 1
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff2_voff4:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v2, 4, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v0, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff2_voff4:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 1
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff2_voff4:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -855,26 +1104,49 @@ define amdgpu_kernel void @soff4_voff1(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff4_voff1:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_lshl_b32 s0, s0, 2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v4, 1, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v5, 2, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v4, v1, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v5, v2, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v3, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff4_voff1:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 1, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v4, 2, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v2, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v3, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v4, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff4_voff1:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v4, 1, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v5, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v0, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v4, v1, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v5, v2, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff4_voff1:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -897,20 +1169,37 @@ define amdgpu_kernel void @soff4_voff1(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff4_voff1:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v3, 4 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: s_lshl_b32 s0, s0, 2
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff4_voff1:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.h, 2
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v1, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff4_voff1:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, 4 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff4_voff1:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -988,27 +1277,51 @@ define amdgpu_kernel void @soff4_voff2(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff4_voff2:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_u32_u24_e32 v0, 2, v0
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_lshl_b32 s0, s0, 2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v4, 2, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v5, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v1, off offset:1 dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v4, v2, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v5, v3, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff4_voff2:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 2, v2
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v4, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, off offset:1 dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v3, v0, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v4, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff4_voff2:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v4, 2, v0
+; GFX11-SDAG-FAKE16-NEXT: v_add_nc_u32_e32 v5, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, off offset:1 dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v4, v2, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v5, v3, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff4_voff2:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -1033,22 +1346,41 @@ define amdgpu_kernel void @soff4_voff2(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff4_voff2:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v0, 2, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: s_lshl_b32 s0, s0, 2
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff4_voff2:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v2, 2, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v0, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff4_voff2:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 2, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff4_voff2:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -1127,26 +1459,49 @@ define amdgpu_kernel void @soff4_voff4(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff4_voff4:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, 4
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_u32_u24_e32 v0, 4, v0
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_lshl_b32 s0, s0, 2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_add_nc_u32 v3, 4, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v1, off offset:1 dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v2, off offset:2 dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: scratch_store_b8 v3, v4, off dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff4_voff4:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v2, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_add_nc_u32_e32 v3, 4, v2
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, off offset:1 dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v0, off offset:2 dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v3, v1, off dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff4_voff4:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, 4
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_add_nc_u32 v3, 4, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, off offset:1 dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, off offset:2 dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v3, v4, off dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff4_voff4:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -1171,22 +1526,41 @@ define amdgpu_kernel void @soff4_voff4(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff4_voff4:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v0, 4, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: s_lshl_b32 s0, s0, 2
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff4_voff4:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_mul_u32_u24_e32 v2, 4, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 4
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v0, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_d16_hi_b8 v2, v0, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v2, v1, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff4_voff4:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_mul_u32_u24_e32 v0, 4, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff4_voff4:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -1246,16 +1620,28 @@ define amdgpu_kernel void @soff1_voff1_negative(i32 %soff) {
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_endpgm
;
-; GFX11-SDAG-LABEL: soff1_voff1_negative:
-; GFX11-SDAG: ; %bb.0: ; %bb
-; GFX11-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add3_u32 v0, 0, s0, v0
-; GFX11-SDAG-NEXT: scratch_store_b8 v0, v1, off offset:-1 dlc
-; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: soff1_voff1_negative:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_add3_u32 v1, 0, s0, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT: scratch_store_b8 v1, v0, off offset:-1 dlc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-SDAG-FAKE16-LABEL: soff1_voff1_negative:
+; GFX11-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX11-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX11-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_add3_u32 v0, 0, s0, v0
+; GFX11-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, off offset:-1 dlc
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: soff1_voff1_negative:
; GFX11-GISEL: ; %bb.0: ; %bb
@@ -1268,14 +1654,24 @@ define amdgpu_kernel void @soff1_voff1_negative(i32 %soff) {
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
;
-; GFX12-SDAG-LABEL: soff1_voff1_negative:
-; GFX12-SDAG: ; %bb.0: ; %bb
-; GFX12-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: scratch_store_b8 v0, v1, s0 offset:-1 scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
-; GFX12-SDAG-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: soff1_voff1_negative:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 1
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: scratch_store_b8 v1, v0, s0 offset:-1 scope:SCOPE_SYS
+; GFX12-SDAG-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-SDAG-FAKE16-LABEL: soff1_voff1_negative:
+; GFX12-SDAG-FAKE16: ; %bb.0: ; %bb
+; GFX12-SDAG-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX12-SDAG-FAKE16-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: scratch_store_b8 v0, v1, s0 offset:-1 scope:SCOPE_SYS
+; GFX12-SDAG-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-FAKE16-NEXT: s_endpgm
;
; GFX12-GISEL-LABEL: soff1_voff1_negative:
; GFX12-GISEL: ; %bb.0: ; %bb
@@ -1296,3 +1692,10 @@ bb:
store volatile i8 1, ptr addrspace(5) %p1
ret void
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX11-GISEL-FAKE16: {{.*}}
+; GFX11-GISEL-TRUE16: {{.*}}
+; GFX11-SDAG: {{.*}}
+; GFX12-GISEL-FAKE16: {{.*}}
+; GFX12-GISEL-TRUE16: {{.*}}
+; GFX12-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/gfx11-twoaddr-fma.mir b/llvm/test/CodeGen/AMDGPU/gfx11-twoaddr-fma.mir
index 85c6577..ddf3aa2 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx11-twoaddr-fma.mir
+++ b/llvm/test/CodeGen/AMDGPU/gfx11-twoaddr-fma.mir
@@ -1,6 +1,6 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 %s -run-pass twoaddressinstruction -verify-machineinstrs -o - | FileCheck --check-prefixes=GFX11 %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 %s --passes=two-address-instruction -verify-each -o - | FileCheck --check-prefixes=GFX11 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 %s -run-pass twoaddressinstruction -verify-machineinstrs -o - | FileCheck --check-prefixes=GFX11 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 %s --passes=two-address-instruction -verify-each -o - | FileCheck --check-prefixes=GFX11 %s
---
name: test_fmamk_reg_imm_f16
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
index 888c1e2..13c9ef4 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -8252,50 +8254,95 @@ define void @global_agent_atomic_fadd_noret_f64__offset12b_neg__amdgpu_no_fine_g
; --------------------------------------------------------------------
define half @global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB44_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8330,45 +8377,85 @@ define half @global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory(ptr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB44_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8591,51 +8678,97 @@ define half @global_agent_atomic_fadd_ret_f16__amdgpu_no_fine_grained_memory(ptr
}
define half @global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB45_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8672,46 +8805,87 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_gra
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB45_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8942,51 +9116,97 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_gra
}
define half @global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB46_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB46_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB46_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB46_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9024,46 +9244,87 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_gra
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB46_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB46_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB46_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB46_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9294,48 +9555,91 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_gra
}
define void @global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB47_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB47_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB47_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB47_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9369,43 +9673,81 @@ define void @global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory(p
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB47_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB47_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB47_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB47_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9621,49 +9963,93 @@ define void @global_agent_atomic_fadd_noret_f16__amdgpu_no_fine_grained_memory(p
}
define void @global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB48_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB48_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB48_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB48_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9699,44 +10085,83 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB48_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB48_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB48_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB48_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9959,49 +10384,93 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_g
}
define void @global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB49_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB49_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB49_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10038,44 +10507,83 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB49_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB49_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB49_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10298,39 +10806,73 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_g
}
define half @global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v4, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB50_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v4.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10357,34 +10899,63 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v4, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB50_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v4.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10560,37 +11131,69 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_
}
define void @global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off offset:2046
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_add_f16_e32 v3, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB51_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v4.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10616,32 +11219,59 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_n
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off offset:2046
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_add_f16_e32 v3, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB51_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v4.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10812,52 +11442,99 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_n
}
define half @global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB52_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10894,46 +11571,87 @@ define half @global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_add_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB52_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11166,50 +11884,95 @@ define half @global_system_atomic_fadd_ret_f16__offset12b_pos__amdgpu_no_fine_gr
}
define void @global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB53_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11245,44 +12008,83 @@ define void @global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB53_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11511,59 +12313,114 @@ define void @global_system_atomic_fadd_noret_f16__offset12b_pos__amdgpu_no_fine_
; --------------------------------------------------------------------
define bfloat @global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB54_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11607,54 +12464,104 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB54_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11904,61 +12811,118 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__amdgpu_no_fine_grained_memory(
}
define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB55_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -12004,56 +12968,108 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB55_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -12311,61 +13327,118 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_
}
define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB56_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -12412,56 +13485,108 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB56_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -12719,57 +13844,110 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_
}
define void @global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_add_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB57_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -12812,52 +13990,100 @@ define void @global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB57_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -13100,59 +14326,114 @@ define void @global_agent_atomic_fadd_noret_bf16__amdgpu_no_fine_grained_memory(
}
define void @global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB58_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -13197,54 +14478,104 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB58_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -13494,59 +14825,114 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_
}
define void @global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB59_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -13592,54 +14978,104 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB59_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -13889,49 +15325,94 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_
}
define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX12-NEXT: v_add_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB60_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -13968,44 +15449,84 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB60_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14214,47 +15735,90 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_
}
define void @global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB61_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14290,42 +15854,80 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB61_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14529,62 +16131,120 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_
}
define bfloat @global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB62_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB62_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB62_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB62_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB62_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB62_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14630,56 +16290,108 @@ define bfloat @global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB62_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB62_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB62_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB62_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB62_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB62_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14939,60 +16651,116 @@ define bfloat @global_system_atomic_fadd_ret_bf16__offset12b_pos__amdgpu_no_fine
}
define void @global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB63_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB63_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB63_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB63_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB63_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB63_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15037,54 +16805,104 @@ define void @global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB63_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB63_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB63_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB63_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB63_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB63_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fadd_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -18560,54 +20378,104 @@ define <2 x bfloat> @global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB78_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB78_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB78_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB78_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB78_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB78_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -18889,54 +20757,104 @@ define <2 x bfloat> @global_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB79_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB79_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB79_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB79_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB79_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB79_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -19220,54 +21138,104 @@ define <2 x bfloat> @global_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB80_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB80_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB80_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB80_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB80_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB80_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -19555,52 +21523,100 @@ define void @global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memor
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB81_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB81_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB81_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB81_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB81_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB81_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -19874,52 +21890,100 @@ define void @global_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fin
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB82_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB82_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB82_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB82_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB82_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB82_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -20196,52 +22260,100 @@ define void @global_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fin
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB83_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB83_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB83_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB83_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB83_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB83_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -20527,54 +22639,104 @@ define <2 x bfloat> @global_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB84_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB84_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB84_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB84_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB84_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB84_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -20861,52 +23023,100 @@ define void @global_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fi
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB85_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB85_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB85_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB85_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB85_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB85_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -21185,54 +23395,104 @@ define <2 x bfloat> @global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memor
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB86_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB86_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB86_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB86_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB86_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB86_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -21514,52 +23774,100 @@ define void @global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory(ptr
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB87_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB87_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB87_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB87_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB87_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB87_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -21833,54 +24141,104 @@ define <2 x bfloat> @global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB88_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB88_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB88_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB88_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB88_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB88_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -22162,52 +24520,100 @@ define void @global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memor
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB89_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB89_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB89_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB89_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB89_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB89_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_v2bf16__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
@@ -22481,54 +24887,104 @@ define <2 x bfloat> @global_agent_atomic_fadd_ret_v2bf16__maybe_remote(ptr addrs
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_ret_v2bf16__maybe_remote:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB90_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB90_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__maybe_remote:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB90_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB90_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_ret_v2bf16__maybe_remote:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB90_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB90_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_ret_v2bf16__maybe_remote:
; GFX10: ; %bb.0:
@@ -22810,52 +25266,100 @@ define void @global_agent_atomic_fadd_noret_v2bf16__maybe_remote(ptr addrspace(1
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fadd_noret_v2bf16__maybe_remote:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB91_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB91_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__maybe_remote:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB91_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB91_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fadd_noret_v2bf16__maybe_remote:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB91_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB91_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fadd_noret_v2bf16__maybe_remote:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
index b4286a0..a24d6c5 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -4443,52 +4445,99 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory__
; --------------------------------------------------------------------
define half @global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4525,47 +4574,89 @@ define half @global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory(ptr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -4796,53 +4887,103 @@ define half @global_agent_atomic_fmax_ret_f16__amdgpu_no_fine_grained_memory(ptr
}
define half @global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4881,48 +5022,93 @@ define half @global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_gra
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -5161,53 +5347,103 @@ define half @global_agent_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_gra
}
define half @global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB28_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -5247,48 +5483,93 @@ define half @global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_gra
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB28_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -5527,51 +5808,97 @@ define half @global_agent_atomic_fmax_ret_f16__offset12b_neg__amdgpu_no_fine_gra
}
define void @global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB29_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v3.l, v3.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -5607,46 +5934,87 @@ define void @global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory(p
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB29_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v3.l, v3.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -5870,52 +6238,101 @@ define void @global_agent_atomic_fmax_noret_f16__amdgpu_no_fine_grained_memory(p
}
define void @global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB30_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -5953,47 +6370,91 @@ define void @global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB30_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6224,52 +6685,101 @@ define void @global_agent_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_g
}
define void @global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB31_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6308,47 +6818,91 @@ define void @global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB31_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6579,41 +7133,77 @@ define void @global_agent_atomic_fmax_noret_f16__offset12b_neg__amdgpu_no_fine_g
}
define half @global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, v4, v4
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB32_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v4, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6642,36 +7232,67 @@ define half @global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, v4, v4
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB32_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v4, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6855,40 +7476,75 @@ define half @global_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_
}
define void @global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_max_num_f16_e32 v4, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v4
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB33_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v4.l, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6916,35 +7572,65 @@ define void @global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_n
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_max_f16_e32 v4, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_max_f16_e32 v2, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB33_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v4.l, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7123,54 +7809,105 @@ define void @global_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_n
}
define half @global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB34_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7209,48 +7946,93 @@ define half @global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB34_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7491,53 +8273,103 @@ define half @global_system_atomic_fmax_ret_f16__offset12b_pos__amdgpu_no_fine_gr
}
define void @global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB35_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7575,47 +8407,91 @@ define void @global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB35_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7852,59 +8728,114 @@ define void @global_system_atomic_fmax_noret_f16__offset12b_pos__amdgpu_no_fine_
; --------------------------------------------------------------------
define bfloat @global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB36_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7948,54 +8879,104 @@ define bfloat @global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB36_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8247,61 +9228,118 @@ define bfloat @global_agent_atomic_fmax_ret_bf16__amdgpu_no_fine_grained_memory(
}
define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB37_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8347,56 +9385,108 @@ define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB37_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8656,61 +9746,118 @@ define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_
}
define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB38_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8757,56 +9904,108 @@ define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB38_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9066,57 +10265,110 @@ define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_neg__amdgpu_no_fine_
}
define void @global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB39_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9159,52 +10411,100 @@ define void @global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB39_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9449,59 +10749,114 @@ define void @global_agent_atomic_fmax_noret_bf16__amdgpu_no_fine_grained_memory(
}
define void @global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB40_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9546,54 +10901,104 @@ define void @global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB40_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9845,59 +11250,114 @@ define void @global_agent_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_
}
define void @global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB41_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9943,54 +11403,104 @@ define void @global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB41_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10242,49 +11752,94 @@ define void @global_agent_atomic_fmax_noret_bf16__offset12b_neg__amdgpu_no_fine_
}
define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB42_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10321,44 +11876,84 @@ define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB42_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10569,47 +12164,90 @@ define bfloat @global_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_
}
define void @global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB43_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10645,42 +12283,80 @@ define void @global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB43_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10886,62 +12562,120 @@ define void @global_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_
}
define bfloat @global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB44_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10987,56 +12721,108 @@ define bfloat @global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB44_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11298,60 +13084,116 @@ define bfloat @global_system_atomic_fmax_ret_bf16__offset12b_pos__amdgpu_no_fine
}
define void @global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB45_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11396,54 +13238,104 @@ define void @global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB45_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmax_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14038,57 +15930,111 @@ define void @global_system_atomic_fmax_noret_v2f16__offset12b_pos__amdgpu_no_fin
; --------------------------------------------------------------------
define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB54_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14132,54 +16078,104 @@ define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB54_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14438,57 +16434,111 @@ define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__amdgpu_no_fine_grained
}
define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB55_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14532,54 +16582,104 @@ define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB55_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14840,57 +16940,111 @@ define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_
}
define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB56_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14934,54 +17088,104 @@ define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB56_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15246,55 +17450,107 @@ define <2 x bfloat> @global_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_
}
define void @global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_max_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB57_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15337,52 +17593,100 @@ define void @global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memor
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB57_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15633,55 +17937,107 @@ define void @global_agent_atomic_fmax_noret_v2bf16__amdgpu_no_fine_grained_memor
}
define void @global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_max_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB58_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15724,52 +18080,100 @@ define void @global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fin
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB58_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16023,55 +18427,107 @@ define void @global_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fin
}
define void @global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_max_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB59_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16114,52 +18570,100 @@ define void @global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fin
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB59_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16421,58 +18925,113 @@ define void @global_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fin
}
define <2 x bfloat> @global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_max_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB60_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16516,54 +19075,104 @@ define <2 x bfloat> @global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB60_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16826,56 +19435,109 @@ define <2 x bfloat> @global_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu
}
define void @global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_max_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB61_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16918,52 +19580,100 @@ define void @global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fi
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB61_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
index 92a402d..5834d4a 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -4443,52 +4445,99 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory__
; --------------------------------------------------------------------
define half @global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4525,47 +4574,89 @@ define half @global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory(ptr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -4796,53 +4887,103 @@ define half @global_agent_atomic_fmin_ret_f16__amdgpu_no_fine_grained_memory(ptr
}
define half @global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -4881,48 +5022,93 @@ define half @global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_gra
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -5161,53 +5347,103 @@ define half @global_agent_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_gra
}
define half @global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB28_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -5247,48 +5483,93 @@ define half @global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_gra
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB28_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -5527,51 +5808,97 @@ define half @global_agent_atomic_fmin_ret_f16__offset12b_neg__amdgpu_no_fine_gra
}
define void @global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
-; GFX12-NEXT: v_min_num_f16_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB29_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v3.l, v3.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -5607,46 +5934,87 @@ define void @global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory(p
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX11-NEXT: v_min_f16_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB29_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v3.l, v3.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -5870,52 +6238,101 @@ define void @global_agent_atomic_fmin_noret_f16__amdgpu_no_fine_grained_memory(p
}
define void @global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB30_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -5953,47 +6370,91 @@ define void @global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_min_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB30_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6224,52 +6685,101 @@ define void @global_agent_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_g
}
define void @global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB31_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6308,47 +6818,91 @@ define void @global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_g
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_min_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB31_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6579,41 +7133,77 @@ define void @global_agent_atomic_fmin_noret_f16__offset12b_neg__amdgpu_no_fine_g
}
define half @global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, v4, v4
-; GFX12-NEXT: v_min_num_f16_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB32_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v4, v4
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6642,36 +7232,67 @@ define half @global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, v4, v4
-; GFX11-NEXT: v_min_f16_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB32_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v4, v4
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -6855,40 +7476,75 @@ define half @global_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_
}
define void @global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_max_num_f16_e32 v4, v2, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v4
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB33_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.h, v4.l, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, v2.h, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v2, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -6916,35 +7572,65 @@ define void @global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_n
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_max_f16_e32 v4, v2, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_max_f16_e32 v2, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v2, v2, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB33_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.h, v4.l, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, v2.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v2, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7123,54 +7809,105 @@ define void @global_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_n
}
define half @global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB34_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX12-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7209,48 +7946,93 @@ define half @global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_gr
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v5, v5, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB34_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v6, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v6
+; GFX11-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7491,53 +8273,103 @@ define half @global_system_atomic_fmin_ret_f16__offset12b_pos__amdgpu_no_fine_gr
}
define void @global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_max_num_f16_e32 v6, v2, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
-; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB35_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.l, v0.l, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX12-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v0.h, v5.l, v5.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v5.l, v0.h, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v6, v2, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7575,47 +8407,91 @@ define void @global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_max_f16_e32 v6, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX11-NEXT: v_min_f16_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB35_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v0, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, -4, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: global_load_b32 v6, v[3:4], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v1, 0xffff
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v5
+; GFX11-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v1, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v5.l, v5.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v5.l, v0.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -7852,59 +8728,114 @@ define void @global_system_atomic_fmin_noret_f16__offset12b_pos__amdgpu_no_fine_
; --------------------------------------------------------------------
define bfloat @global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB36_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -7948,54 +8879,104 @@ define bfloat @global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB36_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8247,61 +9228,118 @@ define bfloat @global_agent_atomic_fmin_ret_bf16__amdgpu_no_fine_grained_memory(
}
define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB37_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8347,56 +9385,108 @@ define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB37_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -8656,61 +9746,118 @@ define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_
}
define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB38_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -8757,56 +9904,108 @@ define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB38_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9066,57 +10265,110 @@ define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_neg__amdgpu_no_fine_
}
define void @global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB39_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9159,52 +10411,100 @@ define void @global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory(
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB39_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9449,59 +10749,114 @@ define void @global_agent_atomic_fmin_noret_bf16__amdgpu_no_fine_grained_memory(
}
define void @global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB40_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9546,54 +10901,104 @@ define void @global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB40_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -9845,59 +11250,114 @@ define void @global_agent_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_
}
define void @global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB41_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -9943,54 +11403,104 @@ define void @global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB41_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10242,49 +11752,94 @@ define void @global_agent_atomic_fmin_noret_bf16__offset12b_neg__amdgpu_no_fine_
}
define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB42_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10321,44 +11876,84 @@ define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB42_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB42_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB42_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10569,47 +12164,90 @@ define bfloat @global_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_
}
define void @global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB43_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10645,42 +12283,80 @@ define void @global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB43_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB43_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -10886,62 +12562,120 @@ define void @global_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_
}
define bfloat @global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB44_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -10987,56 +12721,108 @@ define bfloat @global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB44_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB44_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB44_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -11298,60 +13084,116 @@ define bfloat @global_system_atomic_fmin_ret_bf16__offset12b_pos__amdgpu_no_fine
}
define void @global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB45_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -11396,54 +13238,104 @@ define void @global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB45_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB45_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB45_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmin_noret_bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14038,57 +15930,111 @@ define void @global_system_atomic_fmin_noret_v2f16__offset12b_pos__amdgpu_no_fin
; --------------------------------------------------------------------
define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB54_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14132,54 +16078,104 @@ define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB54_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14438,57 +16434,111 @@ define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__amdgpu_no_fine_grained
}
define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB55_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14532,54 +16582,104 @@ define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB55_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -14840,57 +16940,111 @@ define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_
}
define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB56_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -14934,54 +17088,104 @@ define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB56_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15246,55 +17450,107 @@ define <2 x bfloat> @global_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_
}
define void @global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_min_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB57_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15337,52 +17593,100 @@ define void @global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memor
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB57_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -15633,55 +17937,107 @@ define void @global_agent_atomic_fmin_noret_v2bf16__amdgpu_no_fine_grained_memor
}
define void @global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_min_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB58_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -15724,52 +18080,100 @@ define void @global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fin
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB58_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB58_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16023,55 +18427,107 @@ define void @global_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fin
}
define void @global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_min_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB59_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16114,52 +18570,100 @@ define void @global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fin
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB59_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB59_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB59_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16421,58 +18925,113 @@ define void @global_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fin
}
define <2 x bfloat> @global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_min_num_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB60_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16516,54 +19075,104 @@ define <2 x bfloat> @global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB60_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB60_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB60_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
@@ -16826,56 +19435,109 @@ define <2 x bfloat> @global_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu
}
define void @global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_min_num_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB61_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
@@ -16918,52 +19580,100 @@ define void @global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fi
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB61_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB61_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB61_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
index 2f5d9d7..7651853 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -5198,50 +5200,95 @@ define void @global_agent_atomic_fsub_noret_f64__offset12b_neg(ptr addrspace(1)
; --------------------------------------------------------------------
define half @global_agent_atomic_fsub_ret_f16(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB22_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB22_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB22_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_f16:
; GFX942: ; %bb.0:
@@ -5276,45 +5323,85 @@ define half @global_agent_atomic_fsub_ret_f16(ptr addrspace(1) %ptr, half %val)
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB22_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB22_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB22_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_f16:
; GFX10: ; %bb.0:
@@ -5537,51 +5624,97 @@ define half @global_agent_atomic_fsub_ret_f16(ptr addrspace(1) %ptr, half %val)
}
define half @global_agent_atomic_fsub_ret_f16__offset12b_pos(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB23_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB23_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB23_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -5618,46 +5751,87 @@ define half @global_agent_atomic_fsub_ret_f16__offset12b_pos(ptr addrspace(1) %p
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB23_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB23_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB23_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -5888,51 +6062,97 @@ define half @global_agent_atomic_fsub_ret_f16__offset12b_pos(ptr addrspace(1) %p
}
define half @global_agent_atomic_fsub_ret_f16__offset12b_neg(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB24_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -5970,46 +6190,87 @@ define half @global_agent_atomic_fsub_ret_f16__offset12b_neg(ptr addrspace(1) %p
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB24_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -6240,48 +6501,91 @@ define half @global_agent_atomic_fsub_ret_f16__offset12b_neg(ptr addrspace(1) %p
}
define void @global_agent_atomic_fsub_noret_f16(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB25_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_f16:
; GFX942: ; %bb.0:
@@ -6315,43 +6619,81 @@ define void @global_agent_atomic_fsub_noret_f16(ptr addrspace(1) %ptr, half %val
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB25_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_f16:
; GFX10: ; %bb.0:
@@ -6567,49 +6909,93 @@ define void @global_agent_atomic_fsub_noret_f16(ptr addrspace(1) %ptr, half %val
}
define void @global_agent_atomic_fsub_noret_f16__offset12b_pos(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -6645,44 +7031,83 @@ define void @global_agent_atomic_fsub_noret_f16__offset12b_pos(ptr addrspace(1)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -6905,49 +7330,93 @@ define void @global_agent_atomic_fsub_noret_f16__offset12b_pos(ptr addrspace(1)
}
define void @global_agent_atomic_fsub_noret_f16__offset12b_neg(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -6984,44 +7453,83 @@ define void @global_agent_atomic_fsub_noret_f16__offset12b_neg(ptr addrspace(1)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_f16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -7244,39 +7752,73 @@ define void @global_agent_atomic_fsub_noret_f16__offset12b_neg(ptr addrspace(1)
}
define half @global_agent_atomic_fsub_ret_f16__offset12b_pos__align4(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v4, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB28_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v4.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
; GFX942: ; %bb.0:
@@ -7303,34 +7845,63 @@ define half @global_agent_atomic_fsub_ret_f16__offset12b_pos__align4(ptr addrspa
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB28_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v4, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB28_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v4.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_f16__offset12b_pos__align4:
; GFX10: ; %bb.0:
@@ -7506,37 +8077,69 @@ define half @global_agent_atomic_fsub_ret_f16__offset12b_pos__align4(ptr addrspa
}
define void @global_agent_atomic_fsub_noret_f16__offset12b__align4_pos(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off offset:2046
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_sub_f16_e32 v3, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB29_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v4.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
; GFX942: ; %bb.0:
@@ -7562,32 +8165,59 @@ define void @global_agent_atomic_fsub_noret_f16__offset12b__align4_pos(ptr addrs
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off offset:2046
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_sub_f16_e32 v3, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB29_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v4.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_f16__offset12b__align4_pos:
; GFX10: ; %bb.0:
@@ -7758,52 +8388,99 @@ define void @global_agent_atomic_fsub_noret_f16__offset12b__align4_pos(ptr addrs
}
define half @global_system_atomic_fsub_ret_f16__offset12b_pos(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_system_atomic_fsub_ret_f16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB30_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fsub_ret_f16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fsub_ret_f16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fsub_ret_f16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -7840,46 +8517,87 @@ define half @global_system_atomic_fsub_ret_f16__offset12b_pos(ptr addrspace(1) %
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v4
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fsub_ret_f16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_sub_f16_e32 v5, v5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB30_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fsub_ret_f16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v5.l, v5.l, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fsub_ret_f16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fsub_ret_f16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -8112,50 +8830,95 @@ define half @global_system_atomic_fsub_ret_f16__offset12b_pos(ptr addrspace(1) %
}
define void @global_system_atomic_fsub_noret_f16__offset12b_pos(ptr addrspace(1) %ptr, half %val) #0 {
-; GFX12-LABEL: global_system_atomic_fsub_noret_f16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB31_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fsub_noret_f16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fsub_noret_f16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fsub_noret_f16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -8191,44 +8954,83 @@ define void @global_system_atomic_fsub_noret_f16__offset12b_pos(ptr addrspace(1)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fsub_noret_f16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f16_e32 v3, v3, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB31_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fsub_noret_f16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f16_e32 v3.l, v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fsub_noret_f16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f16_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fsub_noret_f16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -8457,59 +9259,114 @@ define void @global_system_atomic_fsub_noret_f16__offset12b_pos(ptr addrspace(1)
; --------------------------------------------------------------------
define bfloat @global_agent_atomic_fsub_ret_bf16(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB32_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_bf16:
; GFX942: ; %bb.0:
@@ -8553,54 +9410,104 @@ define bfloat @global_agent_atomic_fsub_ret_bf16(ptr addrspace(1) %ptr, bfloat %
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB32_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB32_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB32_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_bf16:
; GFX10: ; %bb.0:
@@ -8850,61 +9757,118 @@ define bfloat @global_agent_atomic_fsub_ret_bf16(ptr addrspace(1) %ptr, bfloat %
}
define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_pos(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB33_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -8950,56 +9914,108 @@ define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_pos(ptr addrspace(1)
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB33_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB33_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB33_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -9257,61 +10273,118 @@ define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_pos(ptr addrspace(1)
}
define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_neg(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB34_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -9358,56 +10431,108 @@ define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_neg(ptr addrspace(1)
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB34_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB34_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -9665,57 +10790,110 @@ define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_neg(ptr addrspace(1)
}
define void @global_agent_atomic_fsub_noret_bf16(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: global_load_b32 v4, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v6, v3
-; GFX12-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB35_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_bf16:
; GFX942: ; %bb.0:
@@ -9758,52 +10936,100 @@ define void @global_agent_atomic_fsub_noret_bf16(ptr addrspace(1) %ptr, bfloat %
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: global_load_b32 v4, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 3, v3
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v6, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB35_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v5, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v6, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB35_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v5, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: global_load_b32 v4, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 3, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v5, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v5, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v5, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v6, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_bf16:
; GFX10: ; %bb.0:
@@ -10046,59 +11272,114 @@ define void @global_agent_atomic_fsub_noret_bf16(ptr addrspace(1) %ptr, bfloat %
}
define void @global_agent_atomic_fsub_noret_bf16__offset12b_pos(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB36_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -10143,54 +11424,104 @@ define void @global_agent_atomic_fsub_noret_bf16__offset12b_pos(ptr addrspace(1)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB36_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB36_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -10440,59 +11771,114 @@ define void @global_agent_atomic_fsub_noret_bf16__offset12b_pos(ptr addrspace(1)
}
define void @global_agent_atomic_fsub_noret_bf16__offset12b_neg(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB37_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -10538,54 +11924,104 @@ define void @global_agent_atomic_fsub_noret_bf16__offset12b_neg(ptr addrspace(1)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB37_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0xfffff800, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB37_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -10835,49 +12271,94 @@ define void @global_agent_atomic_fsub_noret_bf16__offset12b_neg(ptr addrspace(1)
}
define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB38_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
; GFX942: ; %bb.0:
@@ -10914,44 +12395,84 @@ define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4(ptr addr
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB38_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[3:4], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB38_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4:
; GFX10: ; %bb.0:
@@ -11160,47 +12681,90 @@ define bfloat @global_agent_atomic_fsub_ret_bf16__offset12b_pos__align4(ptr addr
}
define void @global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB39_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
; GFX942: ; %bb.0:
@@ -11236,42 +12800,80 @@ define void @global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos(ptr addr
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2046
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB39_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2046
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2046 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB39_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos:
; GFX10: ; %bb.0:
@@ -11475,62 +13077,120 @@ define void @global_agent_atomic_fsub_noret_bf16__offset12b__align4_pos(ptr addr
}
define bfloat @global_system_atomic_fsub_ret_bf16__offset12b_pos(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_system_atomic_fsub_ret_bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v5, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v4, v4
-; GFX12-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX12-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB40_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX12-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fsub_ret_bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -11576,56 +13236,108 @@ define bfloat @global_system_atomic_fsub_ret_bf16__offset12b_pos(ptr addrspace(1
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v3, v5
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fsub_ret_bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v5, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v4, v4
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, v3, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, v3, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, v6, v4, v5
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB40_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v3, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, v3, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fsub_ret_bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v3, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v5, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v4, v4
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, v3, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, v6, v4, v5
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB40_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fsub_ret_bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -11885,60 +13597,116 @@ define bfloat @global_system_atomic_fsub_ret_bf16__offset12b_pos(ptr addrspace(1
}
define void @global_system_atomic_fsub_noret_bf16__offset12b_pos(ptr addrspace(1) %ptr, bfloat %val) #0 {
-; GFX12-LABEL: global_system_atomic_fsub_noret_bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX12-NEXT: v_not_b32_e32 v5, v5
-; GFX12-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX12-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB41_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX12-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fsub_noret_bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -11983,54 +13751,104 @@ define void @global_system_atomic_fsub_noret_bf16__offset12b_pos(ptr addrspace(1
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fsub_noret_bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 3, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
-; GFX11-NEXT: v_not_b32_e32 v5, v5
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB41_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v4, v2
-; GFX11-NEXT: v_and_or_b32 v2, v3, v5, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB41_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fsub_noret_bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_co_u32 v4, vcc_lo, 0x7fe, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v5, v4, 0xffff
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v5, v5
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v4, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v3, v5, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB41_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fsub_noret_bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -14479,57 +16297,111 @@ define void @global_system_atomic_fsub_noret_v2f16__offset12b_pos(ptr addrspace(
; --------------------------------------------------------------------
define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB50_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_v2bf16:
; GFX942: ; %bb.0:
@@ -14573,54 +16445,104 @@ define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16(ptr addrspace(1) %ptr,
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB50_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB50_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_v2bf16:
; GFX10: ; %bb.0:
@@ -14879,57 +16801,111 @@ define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16(ptr addrspace(1) %ptr,
}
define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16__offset12b_pos(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB51_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -14973,54 +16949,104 @@ define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16__offset12b_pos(ptr addr
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB51_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -15281,57 +17307,111 @@ define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16__offset12b_pos(ptr addr
}
define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16__offset12b_neg(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB52_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -15375,54 +17455,104 @@ define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16__offset12b_neg(ptr addr
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB52_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:-2048 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB52_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_ret_v2bf16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -15687,55 +17817,107 @@ define <2 x bfloat> @global_agent_atomic_fsub_ret_v2bf16__offset12b_neg(ptr addr
}
define void @global_agent_atomic_fsub_noret_v2bf16(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB53_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_v2bf16:
; GFX942: ; %bb.0:
@@ -15778,52 +17960,100 @@ define void @global_agent_atomic_fsub_noret_v2bf16(ptr addrspace(1) %ptr, <2 x b
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB53_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB53_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_v2bf16:
; GFX10: ; %bb.0:
@@ -16074,55 +18304,107 @@ define void @global_agent_atomic_fsub_noret_v2bf16(ptr addrspace(1) %ptr, <2 x b
}
define void @global_agent_atomic_fsub_noret_v2bf16__offset12b_pos(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB54_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -16165,52 +18447,100 @@ define void @global_agent_atomic_fsub_noret_v2bf16__offset12b_pos(ptr addrspace(
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB54_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB54_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -16464,55 +18794,107 @@ define void @global_agent_atomic_fsub_noret_v2bf16__offset12b_pos(ptr addrspace(
}
define void @global_agent_atomic_fsub_noret_v2bf16__offset12b_neg(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB55_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
; GFX942: ; %bb.0:
@@ -16555,52 +18937,100 @@ define void @global_agent_atomic_fsub_noret_v2bf16__offset12b_neg(ptr addrspace(
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB55_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:-2048
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:-2048 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_agent_atomic_fsub_noret_v2bf16__offset12b_neg:
; GFX10: ; %bb.0:
@@ -16862,58 +19292,113 @@ define void @global_agent_atomic_fsub_noret_v2bf16__offset12b_neg(ptr addrspace(
}
define <2 x bfloat> @global_system_atomic_fsub_ret_v2bf16__offset12b_pos(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_system_atomic_fsub_ret_v2bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX12-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX12-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX12-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX12-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB56_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fsub_ret_v2bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -16957,54 +19442,104 @@ define <2 x bfloat> @global_system_atomic_fsub_ret_v2bf16__offset12b_pos(ptr add
; GFX942-NEXT: v_mov_b32_e32 v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fsub_ret_v2bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
-; GFX11-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB56_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v5, 0xffff, v3, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fsub_ret_v2bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v3, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v3, v[0:1], v[5:6], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB56_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fsub_ret_v2bf16__offset12b_pos:
; GFX10: ; %bb.0:
@@ -17267,56 +19802,109 @@ define <2 x bfloat> @global_system_atomic_fsub_ret_v2bf16__offset12b_pos(ptr add
}
define void @global_system_atomic_fsub_noret_v2bf16__offset12b_pos(ptr addrspace(1) %ptr, <2 x bfloat> %val) #0 {
-; GFX12-LABEL: global_system_atomic_fsub_noret_v2bf16__offset12b_pos:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX12-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX12-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX12-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX12-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX12-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX12-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX12-NEXT: global_wb scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB57_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: global_system_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX12-TRUE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: global_system_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX12-FAKE16-NEXT: global_wb scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SYS
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: global_system_atomic_fsub_noret_v2bf16__offset12b_pos:
; GFX942: ; %bb.0:
@@ -17359,52 +19947,100 @@ define void @global_system_atomic_fsub_noret_v2bf16__offset12b_pos(ptr addrspace
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: global_system_atomic_fsub_noret_v2bf16__offset12b_pos:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v3, v[0:1], off offset:2044
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v4
-; GFX11-NEXT: v_sub_f32_e32 v6, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
-; GFX11-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB57_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: global_system_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v6
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl1_inv
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: global_system_atomic_fsub_noret_v2bf16__offset12b_pos:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v3, v[0:1], off offset:2044
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v6, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v8, v8, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v7, v9, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v6, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off offset:2044 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl1_inv
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB57_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_system_atomic_fsub_noret_v2bf16__offset12b_pos:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 7179f68..29736b6 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -228,6 +228,8 @@
; GCN-O1-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; GCN-O1-NEXT: Scalarize Masked Memory Intrinsics
; GCN-O1-NEXT: Expand reduction intrinsics
+; GCN-O1-NEXT: AMDGPU Preload Kernel Arguments
+; GCN-O1-NEXT: FunctionPass Manager
; GCN-O1-NEXT: AMDGPU Lower Kernel Arguments
; GCN-O1-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O1-NEXT: CallGraph Construction
@@ -523,6 +525,8 @@
; GCN-O1-OPTS-NEXT: Scalarize Masked Memory Intrinsics
; GCN-O1-OPTS-NEXT: Expand reduction intrinsics
; GCN-O1-OPTS-NEXT: Early CSE
+; GCN-O1-OPTS-NEXT: AMDGPU Preload Kernel Arguments
+; GCN-O1-OPTS-NEXT: FunctionPass Manager
; GCN-O1-OPTS-NEXT: AMDGPU Lower Kernel Arguments
; GCN-O1-OPTS-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O1-OPTS-NEXT: CallGraph Construction
@@ -836,6 +840,8 @@
; GCN-O2-NEXT: Scalarize Masked Memory Intrinsics
; GCN-O2-NEXT: Expand reduction intrinsics
; GCN-O2-NEXT: Early CSE
+; GCN-O2-NEXT: AMDGPU Preload Kernel Arguments
+; GCN-O2-NEXT: FunctionPass Manager
; GCN-O2-NEXT: AMDGPU Lower Kernel Arguments
; GCN-O2-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O2-NEXT: CallGraph Construction
@@ -1164,6 +1170,8 @@
; GCN-O3-NEXT: Lazy Block Frequency Analysis
; GCN-O3-NEXT: Optimization Remark Emitter
; GCN-O3-NEXT: Global Value Numbering
+; GCN-O3-NEXT: AMDGPU Preload Kernel Arguments
+; GCN-O3-NEXT: FunctionPass Manager
; GCN-O3-NEXT: AMDGPU Lower Kernel Arguments
; GCN-O3-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O3-NEXT: CallGraph Construction
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
index 9606c68..4fa4b73 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
@@ -407,7 +407,7 @@ main_body:
; TODO: NSA reassign is very limited and cannot work with VGPR tuples and subregs.
-define amdgpu_kernel void @image_bvh_intersect_ray_nsa_reassign(ptr %p_node_ptr, ptr %p_ray, <4 x i32> inreg %tdescr) {
+define amdgpu_kernel void @image_bvh_intersect_ray_nsa_reassign(ptr %p_node_ptr, ptr %p_ray, <4 x i32> %tdescr) {
; GFX1013-LABEL: image_bvh_intersect_ray_nsa_reassign:
; GFX1013: ; %bb.0: ; %main_body
; GFX1013-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
@@ -571,7 +571,7 @@ main_body:
ret void
}
-define amdgpu_kernel void @image_bvh_intersect_ray_a16_nsa_reassign(ptr %p_node_ptr, ptr %p_ray, <4 x i32> inreg %tdescr) {
+define amdgpu_kernel void @image_bvh_intersect_ray_a16_nsa_reassign(ptr %p_node_ptr, ptr %p_ray, <4 x i32> %tdescr) {
; GFX1013-LABEL: image_bvh_intersect_ray_a16_nsa_reassign:
; GFX1013: ; %bb.0: ; %main_body
; GFX1013-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
@@ -719,7 +719,7 @@ main_body:
ret void
}
-define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4 x i32> inreg %tdescr) {
+define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4 x i32> %tdescr) {
; GFX1013-LABEL: image_bvh64_intersect_ray_nsa_reassign:
; GFX1013: ; %bb.0: ; %main_body
; GFX1013-NEXT: s_clause 0x1
@@ -880,7 +880,7 @@ main_body:
ret void
}
-define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray, <4 x i32> inreg %tdescr) {
+define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray, <4 x i32> %tdescr) {
; GFX1013-LABEL: image_bvh64_intersect_ray_a16_nsa_reassign:
; GFX1013: ; %bb.0: ; %main_body
; GFX1013-NEXT: s_clause 0x1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll
index fb4c252..947c838 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.waitcnt.out.order.ll
@@ -1,7 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1150 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX1150 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1150 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX1150,GFX1150-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1150 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX1150,GFX1150-FAKE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
define amdgpu_ps <3 x float> @gather_sample(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, <8 x i32> inreg %rsrc2, <4 x i32> inreg %samp2, float %s, float %t) {
; GFX11-LABEL: gather_sample:
@@ -80,35 +83,69 @@ define amdgpu_ps <3 x float> @sample_gather(<8 x i32> inreg %rsrc, <4 x i32> inr
}
define amdgpu_ps <3 x float> @sample_load(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, <8 x i32> inreg %rsrc2, i16 %s.16, i16 %t.16, i16 %fragid) {
-; GFX11-LABEL: sample_load:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; GFX11-NEXT: v_mov_b32_e32 v4, 0
-; GFX11-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: ; return to shader part epilog
+; GFX11-TRUE16-LABEL: sample_load:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
-; GFX1150-LABEL: sample_load:
-; GFX1150: ; %bb.0:
-; GFX1150-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; GFX1150-NEXT: v_mov_b32_e32 v4, 0
-; GFX1150-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
-; GFX1150-NEXT: s_waitcnt vmcnt(0)
-; GFX1150-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
-; GFX1150-NEXT: s_waitcnt vmcnt(0)
-; GFX1150-NEXT: ; return to shader part epilog
+; GFX11-FAKE16-LABEL: sample_load:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-FAKE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: ; return to shader part epilog
;
-; GFX12-LABEL: sample_load:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; GFX12-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: ; return to shader part epilog
+; GFX1150-TRUE16-LABEL: sample_load:
+; GFX1150-TRUE16: ; %bb.0:
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX1150-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX1150-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX1150-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1150-FAKE16-LABEL: sample_load:
+; GFX1150-FAKE16: ; %bb.0:
+; GFX1150-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX1150-FAKE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX1150-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX1150-FAKE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX1150-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX1150-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX12-TRUE16-LABEL: sample_load:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-TRUE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX12-FAKE16-LABEL: sample_load:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-FAKE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: ; return to shader part epilog
%w = call <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f32(i32 15, float 0.000000e+00, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32.i32(i32 1, i16 %s.16, i16 %t.16, i16 %fragid, <8 x i32> %rsrc2, i32 0, i32 0)
@@ -122,35 +159,69 @@ define amdgpu_ps <3 x float> @sample_load(<8 x i32> inreg %rsrc, <4 x i32> inreg
}
define amdgpu_ps <3 x float> @load_sample(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, <8 x i32> inreg %rsrc2, i16 %s.16, i16 %t.16, i16 %fragid) {
-; GFX11-LABEL: load_sample:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; GFX11-NEXT: v_mov_b32_e32 v4, 0
-; GFX11-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: ; return to shader part epilog
+; GFX11-TRUE16-LABEL: load_sample:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
-; GFX1150-LABEL: load_sample:
-; GFX1150: ; %bb.0:
-; GFX1150-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; GFX1150-NEXT: v_mov_b32_e32 v4, 0
-; GFX1150-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
-; GFX1150-NEXT: s_waitcnt vmcnt(0)
-; GFX1150-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
-; GFX1150-NEXT: s_waitcnt vmcnt(0)
-; GFX1150-NEXT: ; return to shader part epilog
+; GFX11-FAKE16-LABEL: load_sample:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-FAKE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: ; return to shader part epilog
;
-; GFX12-LABEL: load_sample:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; GFX12-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: ; return to shader part epilog
+; GFX1150-TRUE16-LABEL: load_sample:
+; GFX1150-TRUE16: ; %bb.0:
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.l
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l
+; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX1150-TRUE16-NEXT: image_msaa_load v[0:3], v[2:3], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX1150-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX1150-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1150-FAKE16-LABEL: load_sample:
+; GFX1150-FAKE16: ; %bb.0:
+; GFX1150-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX1150-FAKE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX1150-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX1150-FAKE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX1150-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX1150-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX12-TRUE16-LABEL: load_sample:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-TRUE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX12-FAKE16-LABEL: load_sample:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-FAKE16-NEXT: image_msaa_load v[0:3], [v0, v2], s[12:19] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm a16
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: image_sample_lz v2, [v4, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: ; return to shader part epilog
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32.i32(i32 1, i16 %s.16, i16 %t.16, i16 %fragid, <8 x i32> %rsrc2, i32 0, i32 0)
%w = call <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f32(i32 15, float 0.000000e+00, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
index 8cf7497..da2a3ce 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
@@ -1485,7 +1485,7 @@ define amdgpu_kernel void @test_writelane_imm_f64(ptr addrspace(1) %out, double
ret void
}
-define amdgpu_kernel void @test_writelane_sreg_oldval_i32(i32 inreg %oldval, ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
+define amdgpu_kernel void @test_writelane_sreg_oldval_i32(i32 %oldval, ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_i32:
; GFX802-SDAG: ; %bb.0:
; GFX802-SDAG-NEXT: s_load_dword s4, s[8:9], 0x0
@@ -1570,7 +1570,7 @@ define amdgpu_kernel void @test_writelane_sreg_oldval_i32(i32 inreg %oldval, ptr
ret void
}
-define amdgpu_kernel void @test_writelane_sreg_oldval_i64(i64 inreg %oldval, ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
+define amdgpu_kernel void @test_writelane_sreg_oldval_i64(i64 %oldval, ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_i64:
; GFX802-SDAG: ; %bb.0:
; GFX802-SDAG-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
@@ -1673,7 +1673,7 @@ define amdgpu_kernel void @test_writelane_sreg_oldval_i64(i64 inreg %oldval, ptr
ret void
}
-define amdgpu_kernel void @test_writelane_sreg_oldval_f64(double inreg %oldval, ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
+define amdgpu_kernel void @test_writelane_sreg_oldval_f64(double %oldval, ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_f64:
; GFX802-SDAG: ; %bb.0:
; GFX802-SDAG-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index 2bc2a2a7..ae4acfe 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -1238,48 +1240,91 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; --------------------------------------------------------------------
define half @local_atomic_fadd_ret_f16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_ret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v0, v4
-; GFX12-NEXT: v_add_f16_e32 v2, 4.0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v4, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v1, v2, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB8_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v0, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_ret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_ret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v0, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v0, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v4, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v1, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_ret_f16:
; GFX942: ; %bb.0:
@@ -1311,42 +1356,79 @@ define half @local_atomic_fadd_ret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_ret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v0, v4
-; GFX11-NEXT: v_add_f16_e32 v2, 4.0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v4, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v1, v2, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB8_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_ret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_ret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v0, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v4, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v1, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_ret_f16:
; GFX10: ; %bb.0:
@@ -1543,50 +1625,95 @@ define half @local_atomic_fadd_ret_f16(ptr addrspace(3) %ptr) nounwind {
}
define half @local_atomic_fadd_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_ret_f16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX12-NEXT: v_add_f16_e32 v3, 4.0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_ret_f16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_ret_f16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_ret_f16__offset:
; GFX942: ; %bb.0:
@@ -1619,44 +1746,83 @@ define half @local_atomic_fadd_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_ret_f16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX11-NEXT: v_add_f16_e32 v3, 4.0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB9_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_ret_f16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_ret_f16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_ret_f16__offset:
; GFX10: ; %bb.0:
@@ -1860,47 +2026,89 @@ define half @local_atomic_fadd_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_noret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB10_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_noret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_noret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_noret_f16:
; GFX942: ; %bb.0:
@@ -1931,41 +2139,77 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_noret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB10_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_noret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_noret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_noret_f16:
; GFX10: ; %bb.0:
@@ -2154,48 +2398,91 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_noret_f16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB11_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_noret_f16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_noret_f16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_noret_f16__offset:
; GFX942: ; %bb.0:
@@ -2227,42 +2514,79 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_noret_f16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB11_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_noret_f16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_noret_f16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_noret_f16__offset:
; GFX10: ; %bb.0:
@@ -2458,39 +2782,73 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
}
define half @local_atomic_fadd_ret_f16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_ret_f16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v1, 4.0, v2
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB12_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_ret_f16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, 4.0, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_ret_f16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_ret_f16__offset__align4:
; GFX942: ; %bb.0:
@@ -2515,33 +2873,61 @@ define half @local_atomic_fadd_ret_f16__offset__align4(ptr addrspace(3) %ptr) no
; GFX942-NEXT: v_mov_b32_e32 v0, v1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_ret_f16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v1, 4.0, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB12_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_ret_f16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, 4.0, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_ret_f16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_ret_f16__offset__align4:
; GFX10: ; %bb.0:
@@ -2696,37 +3082,69 @@ define half @local_atomic_fadd_ret_f16__offset__align4(ptr addrspace(3) %ptr) no
}
define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_noret_f16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB13_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_noret_f16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v2.l, 4.0, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_noret_f16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_noret_f16__offset__align4:
; GFX942: ; %bb.0:
@@ -2750,31 +3168,57 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_noret_f16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB13_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_noret_f16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v2.l, 4.0, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_noret_f16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_noret_f16__offset__align4:
; GFX10: ; %bb.0:
@@ -2927,57 +3371,110 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; --------------------------------------------------------------------
define bfloat @local_atomic_fadd_ret_bf16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_ret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v3, 4.0, v3
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB14_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_ret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_ret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_ret_bf16:
; GFX942: ; %bb.0:
@@ -3017,51 +3514,98 @@ define bfloat @local_atomic_fadd_ret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_ret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v3, 4.0, v3
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB14_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_ret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_ret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_ret_bf16:
; GFX10: ; %bb.0:
@@ -3281,59 +3825,114 @@ define bfloat @local_atomic_fadd_ret_bf16(ptr addrspace(3) %ptr) nounwind {
}
define bfloat @local_atomic_fadd_ret_bf16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_ret_bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB15_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v3, 4.0, v3
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_ret_bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_ret_bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_ret_bf16__offset:
; GFX942: ; %bb.0:
@@ -3374,53 +3973,102 @@ define bfloat @local_atomic_fadd_ret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_ret_bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB15_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v3, 4.0, v3
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_ret_bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_ret_bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_ret_bf16__offset:
; GFX10: ; %bb.0:
@@ -3647,56 +4295,108 @@ define bfloat @local_atomic_fadd_ret_bf16__offset(ptr addrspace(3) %ptr) nounwin
}
define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_noret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB16_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_noret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_noret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_noret_bf16:
; GFX942: ; %bb.0:
@@ -3735,50 +4435,96 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_noret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB16_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_noret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_noret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_noret_bf16:
; GFX10: ; %bb.0:
@@ -3990,57 +4736,110 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_noret_bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB17_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_noret_bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_noret_bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_noret_bf16__offset:
; GFX942: ; %bb.0:
@@ -4080,51 +4879,98 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_noret_bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB17_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_noret_bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_noret_bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_noret_bf16__offset:
; GFX10: ; %bb.0:
@@ -4343,48 +5189,92 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
}
define bfloat @local_atomic_fadd_ret_bf16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_ret_bf16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB18_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX12-NEXT: v_add_f32_e32 v1, 4.0, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX12-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB18_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_ret_bf16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_ret_bf16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_ret_bf16__offset__align4:
; GFX942: ; %bb.0:
@@ -4418,42 +5308,80 @@ define bfloat @local_atomic_fadd_ret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: v_mov_b32_e32 v0, v1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_ret_bf16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB18_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_add_f32_e32 v1, 4.0, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB18_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_ret_bf16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_ret_bf16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_ret_bf16__offset__align4:
; GFX10: ; %bb.0:
@@ -4637,46 +5565,88 @@ define bfloat @local_atomic_fadd_ret_bf16__offset__align4(ptr addrspace(3) %ptr)
}
define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fadd_noret_bf16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX12-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB19_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fadd_noret_bf16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, 4.0, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fadd_noret_bf16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, 4.0, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fadd_noret_bf16__offset__align4:
; GFX942: ; %bb.0:
@@ -4709,40 +5679,76 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_noret_bf16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB19_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_noret_bf16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 4.0, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_noret_bf16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 4.0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_noret_bf16__offset__align4:
; GFX10: ; %bb.0:
@@ -5829,52 +6835,101 @@ define <2 x bfloat> @local_atomic_fadd_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bf
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_ret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v2, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX11-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB24_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_ret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_ret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_ret_v2bf16:
; GFX10: ; %bb.0:
@@ -6137,52 +7192,101 @@ define <2 x bfloat> @local_atomic_fadd_ret_v2bf16__offset(ptr addrspace(3) %ptr,
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_ret_v2bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v2, v0 offset:65532
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX11-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX11-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB25_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_ret_v2bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_ret_v2bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_ret_v2bf16__offset:
; GFX10: ; %bb.0:
@@ -6446,50 +7550,96 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_noret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-NEXT: v_add_f32_e32 v4, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_noret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_noret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_noret_v2bf16:
; GFX10: ; %bb.0:
@@ -6744,50 +7894,96 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fadd_noret_v2bf16__ofset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v3, v0 offset:65532
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-NEXT: v_add_f32_e32 v4, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fadd_noret_v2bf16__ofset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fadd_noret_v2bf16__ofset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fadd_noret_v2bf16__ofset:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
index a0cbc4f..28504da 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -782,49 +784,93 @@ define void @local_atomic_fmax_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; --------------------------------------------------------------------
define half @local_atomic_fmax_ret_f16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_ret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, 4.0, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB8_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_ret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_ret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_ret_f16:
; GFX942: ; %bb.0:
@@ -857,43 +903,81 @@ define half @local_atomic_fmax_ret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_ret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, 4.0, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB8_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_ret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_ret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_ret_f16:
; GFX10: ; %bb.0:
@@ -1094,51 +1178,97 @@ define half @local_atomic_fmax_ret_f16(ptr addrspace(3) %ptr) nounwind {
}
define half @local_atomic_fmax_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_ret_f16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v3, 4.0, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_ret_f16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_ret_f16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_ret_f16__offset:
; GFX942: ; %bb.0:
@@ -1172,45 +1302,85 @@ define half @local_atomic_fmax_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_ret_f16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v3, 4.0, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB9_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_ret_f16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_ret_f16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_ret_f16__offset:
; GFX10: ; %bb.0:
@@ -1418,48 +1588,91 @@ define half @local_atomic_fmax_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_noret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v4, 4.0, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB10_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_noret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_noret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_noret_f16:
; GFX942: ; %bb.0:
@@ -1491,42 +1704,79 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_noret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB10_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_noret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_noret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_noret_f16:
; GFX10: ; %bb.0:
@@ -1719,50 +1969,95 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_noret_f16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-NEXT: v_max_num_f16_e32 v4, 4.0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB11_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_noret_f16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_noret_f16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_noret_f16__offset:
; GFX942: ; %bb.0:
@@ -1795,44 +2090,83 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_noret_f16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB11_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_noret_f16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_noret_f16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_noret_f16__offset:
; GFX10: ; %bb.0:
@@ -2032,40 +2366,75 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
}
define half @local_atomic_fmax_ret_f16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_ret_f16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v1, v2, v2
-; GFX12-NEXT: v_max_num_f16_e32 v1, 4.0, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB12_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_ret_f16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, 4.0, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_ret_f16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_ret_f16__offset__align4:
; GFX942: ; %bb.0:
@@ -2091,34 +2460,63 @@ define half @local_atomic_fmax_ret_f16__offset__align4(ptr addrspace(3) %ptr) no
; GFX942-NEXT: v_mov_b32_e32 v0, v1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_ret_f16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v1, v2, v2
-; GFX11-NEXT: v_max_f16_e32 v1, 4.0, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB12_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_ret_f16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, 4.0, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_ret_f16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_ret_f16__offset__align4:
; GFX10: ; %bb.0:
@@ -2277,39 +2675,73 @@ define half @local_atomic_fmax_ret_f16__offset__align4(ptr addrspace(3) %ptr) no
}
define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_noret_f16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v1, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v2, 4.0, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB13_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_noret_f16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v1.l, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, 4.0, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_noret_f16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v1, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, 4.0, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_noret_f16__offset__align4:
; GFX942: ; %bb.0:
@@ -2334,33 +2766,61 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_noret_f16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB13_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_noret_f16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v1.l, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, 4.0, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_noret_f16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, 4.0, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_noret_f16__offset__align4:
; GFX10: ; %bb.0:
@@ -2517,57 +2977,110 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; --------------------------------------------------------------------
define bfloat @local_atomic_fmax_ret_bf16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_ret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v3, 4.0, v3
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB14_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_ret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_ret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_ret_bf16:
; GFX942: ; %bb.0:
@@ -2607,51 +3120,98 @@ define bfloat @local_atomic_fmax_ret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_ret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v3, 4.0, v3
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB14_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_ret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_ret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_ret_bf16:
; GFX10: ; %bb.0:
@@ -2873,59 +3433,114 @@ define bfloat @local_atomic_fmax_ret_bf16(ptr addrspace(3) %ptr) nounwind {
}
define bfloat @local_atomic_fmax_ret_bf16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_ret_bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB15_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v3, 4.0, v3
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_ret_bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_ret_bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_ret_bf16__offset:
; GFX942: ; %bb.0:
@@ -2966,53 +3581,102 @@ define bfloat @local_atomic_fmax_ret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_ret_bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB15_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v3, 4.0, v3
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_ret_bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_ret_bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_ret_bf16__offset:
; GFX10: ; %bb.0:
@@ -3241,56 +3905,108 @@ define bfloat @local_atomic_fmax_ret_bf16__offset(ptr addrspace(3) %ptr) nounwin
}
define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_noret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB16_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_noret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_noret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_noret_bf16:
; GFX942: ; %bb.0:
@@ -3329,50 +4045,96 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_noret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB16_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_noret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, 4.0, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_noret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_noret_bf16:
; GFX10: ; %bb.0:
@@ -3586,57 +4348,110 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_noret_bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB17_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_noret_bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_noret_bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_noret_bf16__offset:
; GFX942: ; %bb.0:
@@ -3676,51 +4491,98 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_noret_bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB17_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_noret_bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, 4.0, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_noret_bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_noret_bf16__offset:
; GFX10: ; %bb.0:
@@ -3941,48 +4803,92 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
}
define bfloat @local_atomic_fmax_ret_bf16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_ret_bf16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB18_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX12-NEXT: v_max_num_f32_e32 v1, 4.0, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX12-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB18_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_ret_bf16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_ret_bf16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_ret_bf16__offset__align4:
; GFX942: ; %bb.0:
@@ -4016,42 +4922,80 @@ define bfloat @local_atomic_fmax_ret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: v_mov_b32_e32 v0, v1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_ret_bf16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB18_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_max_f32_e32 v1, 4.0, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB18_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_ret_bf16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_ret_bf16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_ret_bf16__offset__align4:
; GFX10: ; %bb.0:
@@ -4237,46 +5181,88 @@ define bfloat @local_atomic_fmax_ret_bf16__offset__align4(ptr addrspace(3) %ptr)
}
define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmax_noret_bf16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f32_e32 v2, 4.0, v2
-; GFX12-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB19_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_noret_bf16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, 4.0, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_noret_bf16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, 4.0, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_noret_bf16__offset__align4:
; GFX942: ; %bb.0:
@@ -4309,40 +5295,76 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_noret_bf16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB19_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_noret_bf16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, 4.0, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_noret_bf16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, 4.0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_noret_bf16__offset__align4:
; GFX10: ; %bb.0:
@@ -5600,57 +6622,111 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; --------------------------------------------------------------------
define <2 x bfloat> @local_atomic_fmax_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fmax_ret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v2, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX12-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB24_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_ret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_ret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_ret_v2bf16:
; GFX942: ; %bb.0:
@@ -5692,52 +6768,101 @@ define <2 x bfloat> @local_atomic_fmax_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bf
; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_ret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v2, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX11-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB24_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_ret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_ret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_ret_v2bf16:
; GFX10: ; %bb.0:
@@ -5979,57 +7104,111 @@ define <2 x bfloat> @local_atomic_fmax_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bf
}
define <2 x bfloat> @local_atomic_fmax_ret_v2bf16__offset(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fmax_ret_v2bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v2, v0 offset:65532
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX12-NEXT: v_max_num_f32_e32 v5, v5, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX12-NEXT: v_max_num_f32_e32 v2, v2, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX12-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB25_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_ret_v2bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, v2, v1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_ret_v2bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, v2, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_ret_v2bf16__offset:
; GFX942: ; %bb.0:
@@ -6071,52 +7250,101 @@ define <2 x bfloat> @local_atomic_fmax_ret_v2bf16__offset(ptr addrspace(3) %ptr,
; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_ret_v2bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v2, v0 offset:65532
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX11-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX11-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB25_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_ret_v2bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_ret_v2bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_ret_v2bf16__offset:
; GFX10: ; %bb.0:
@@ -6359,54 +7587,105 @@ define <2 x bfloat> @local_atomic_fmax_ret_v2bf16__offset(ptr addrspace(3) %ptr,
}
define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fmax_noret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-NEXT: v_max_num_f32_e32 v4, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_noret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_noret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_noret_v2bf16:
; GFX942: ; %bb.0:
@@ -6447,50 +7726,96 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_noret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-NEXT: v_max_f32_e32 v4, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_noret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_noret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_noret_v2bf16:
; GFX10: ; %bb.0:
@@ -6724,54 +8049,105 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
}
define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fmax_noret_v2bf16__ofset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v3, v0 offset:65532
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-NEXT: v_max_num_f32_e32 v4, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmax_noret_v2bf16__ofset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmax_noret_v2bf16__ofset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmax_noret_v2bf16__ofset:
; GFX942: ; %bb.0:
@@ -6812,50 +8188,96 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmax_noret_v2bf16__ofset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v3, v0 offset:65532
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-NEXT: v_max_f32_e32 v4, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmax_noret_v2bf16__ofset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmax_noret_v2bf16__ofset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmax_noret_v2bf16__ofset:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
index d30d76e..48714b7 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -782,49 +784,93 @@ define void @local_atomic_fmin_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; --------------------------------------------------------------------
define half @local_atomic_fmin_ret_f16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_ret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v3, 4.0, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB8_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_ret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_ret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_ret_f16:
; GFX942: ; %bb.0:
@@ -857,43 +903,81 @@ define half @local_atomic_fmin_ret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_ret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v3, 4.0, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB8_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_ret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_ret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_ret_f16:
; GFX10: ; %bb.0:
@@ -1094,51 +1178,97 @@ define half @local_atomic_fmin_ret_f16(ptr addrspace(3) %ptr) nounwind {
}
define half @local_atomic_fmin_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_ret_f16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v3, 4.0, v3
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_ret_f16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_ret_f16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_ret_f16__offset:
; GFX942: ; %bb.0:
@@ -1172,45 +1302,85 @@ define half @local_atomic_fmin_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_ret_f16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v3, 4.0, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB9_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_ret_f16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_ret_f16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_ret_f16__offset:
; GFX10: ; %bb.0:
@@ -1418,48 +1588,91 @@ define half @local_atomic_fmin_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_noret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v4, 4.0, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB10_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_noret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_noret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_noret_f16:
; GFX942: ; %bb.0:
@@ -1491,42 +1704,79 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_noret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB10_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_noret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_noret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_noret_f16:
; GFX10: ; %bb.0:
@@ -1719,50 +1969,95 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_noret_f16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-NEXT: v_min_num_f16_e32 v4, 4.0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB11_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_noret_f16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_noret_f16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_noret_f16__offset:
; GFX942: ; %bb.0:
@@ -1795,44 +2090,83 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_noret_f16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB11_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_noret_f16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_noret_f16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_noret_f16__offset:
; GFX10: ; %bb.0:
@@ -2032,40 +2366,75 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
}
define half @local_atomic_fmin_ret_f16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_ret_f16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f16_e32 v1, v2, v2
-; GFX12-NEXT: v_min_num_f16_e32 v1, 4.0, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB12_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_ret_f16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v1.l, 4.0, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_ret_f16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_ret_f16__offset__align4:
; GFX942: ; %bb.0:
@@ -2091,34 +2460,63 @@ define half @local_atomic_fmin_ret_f16__offset__align4(ptr addrspace(3) %ptr) no
; GFX942-NEXT: v_mov_b32_e32 v0, v1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_ret_f16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f16_e32 v1, v2, v2
-; GFX11-NEXT: v_min_f16_e32 v1, 4.0, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB12_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_ret_f16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v1.l, 4.0, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_ret_f16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_ret_f16__offset__align4:
; GFX10: ; %bb.0:
@@ -2277,39 +2675,73 @@ define half @local_atomic_fmin_ret_f16__offset__align4(ptr addrspace(3) %ptr) no
}
define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_noret_f16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_max_num_f16_e32 v2, v1, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f16_e32 v2, 4.0, v2
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB13_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_noret_f16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v1.l, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v2.l, 4.0, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_noret_f16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v1, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, 4.0, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_noret_f16__offset__align4:
; GFX942: ; %bb.0:
@@ -2334,33 +2766,61 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_noret_f16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB13_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_noret_f16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v1.l, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v2.l, 4.0, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_noret_f16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, 4.0, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_noret_f16__offset__align4:
; GFX10: ; %bb.0:
@@ -2517,57 +2977,110 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; --------------------------------------------------------------------
define bfloat @local_atomic_fmin_ret_bf16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_ret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v3, 4.0, v3
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB14_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_ret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_ret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_ret_bf16:
; GFX942: ; %bb.0:
@@ -2607,51 +3120,98 @@ define bfloat @local_atomic_fmin_ret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_ret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v3, 4.0, v3
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB14_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_ret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_ret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_ret_bf16:
; GFX10: ; %bb.0:
@@ -2873,59 +3433,114 @@ define bfloat @local_atomic_fmin_ret_bf16(ptr addrspace(3) %ptr) nounwind {
}
define bfloat @local_atomic_fmin_ret_bf16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_ret_bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB15_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v3, 4.0, v3
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_ret_bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_ret_bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_ret_bf16__offset:
; GFX942: ; %bb.0:
@@ -2966,53 +3581,102 @@ define bfloat @local_atomic_fmin_ret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_ret_bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB15_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v3, 4.0, v3
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_ret_bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_ret_bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_ret_bf16__offset:
; GFX10: ; %bb.0:
@@ -3241,56 +3905,108 @@ define bfloat @local_atomic_fmin_ret_bf16__offset(ptr addrspace(3) %ptr) nounwin
}
define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_noret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB16_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_noret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_noret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_noret_bf16:
; GFX942: ; %bb.0:
@@ -3329,50 +4045,96 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_noret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB16_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_noret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, 4.0, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_noret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_noret_bf16:
; GFX10: ; %bb.0:
@@ -3586,57 +4348,110 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_noret_bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB17_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_noret_bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_noret_bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_noret_bf16__offset:
; GFX942: ; %bb.0:
@@ -3676,51 +4491,98 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_noret_bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB17_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_noret_bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, 4.0, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_noret_bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_noret_bf16__offset:
; GFX10: ; %bb.0:
@@ -3941,48 +4803,92 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
}
define bfloat @local_atomic_fmin_ret_bf16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_ret_bf16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB18_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX12-NEXT: v_min_num_f32_e32 v1, 4.0, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX12-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB18_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_ret_bf16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_ret_bf16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_ret_bf16__offset__align4:
; GFX942: ; %bb.0:
@@ -4016,42 +4922,80 @@ define bfloat @local_atomic_fmin_ret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: v_mov_b32_e32 v0, v1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_ret_bf16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB18_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_min_f32_e32 v1, 4.0, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB18_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_ret_bf16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_ret_bf16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_ret_bf16__offset__align4:
; GFX10: ; %bb.0:
@@ -4237,46 +5181,88 @@ define bfloat @local_atomic_fmin_ret_bf16__offset__align4(ptr addrspace(3) %ptr)
}
define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fmin_noret_bf16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f32_e32 v2, 4.0, v2
-; GFX12-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB19_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_noret_bf16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, 4.0, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_noret_bf16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, 4.0, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_noret_bf16__offset__align4:
; GFX942: ; %bb.0:
@@ -4309,40 +5295,76 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_noret_bf16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB19_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_noret_bf16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, 4.0, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_noret_bf16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, 4.0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_noret_bf16__offset__align4:
; GFX10: ; %bb.0:
@@ -5600,57 +6622,111 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; --------------------------------------------------------------------
define <2 x bfloat> @local_atomic_fmin_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fmin_ret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v2, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX12-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB24_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_ret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_ret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_ret_v2bf16:
; GFX942: ; %bb.0:
@@ -5692,52 +6768,101 @@ define <2 x bfloat> @local_atomic_fmin_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bf
; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_ret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v2, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX11-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB24_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_ret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_ret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_ret_v2bf16:
; GFX10: ; %bb.0:
@@ -5979,57 +7104,111 @@ define <2 x bfloat> @local_atomic_fmin_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bf
}
define <2 x bfloat> @local_atomic_fmin_ret_v2bf16__offset(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fmin_ret_v2bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v2, v0 offset:65532
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX12-NEXT: v_min_num_f32_e32 v5, v5, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX12-NEXT: v_min_num_f32_e32 v2, v2, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX12-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB25_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_ret_v2bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, v2, v1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_ret_v2bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, v2, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_ret_v2bf16__offset:
; GFX942: ; %bb.0:
@@ -6071,52 +7250,101 @@ define <2 x bfloat> @local_atomic_fmin_ret_v2bf16__offset(ptr addrspace(3) %ptr,
; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_ret_v2bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v2, v0 offset:65532
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX11-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX11-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB25_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_ret_v2bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_ret_v2bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_ret_v2bf16__offset:
; GFX10: ; %bb.0:
@@ -6359,54 +7587,105 @@ define <2 x bfloat> @local_atomic_fmin_ret_v2bf16__offset(ptr addrspace(3) %ptr,
}
define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fmin_noret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-NEXT: v_min_num_f32_e32 v4, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_noret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_noret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_noret_v2bf16:
; GFX942: ; %bb.0:
@@ -6447,50 +7726,96 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_noret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-NEXT: v_min_f32_e32 v4, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_noret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_noret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_noret_v2bf16:
; GFX10: ; %bb.0:
@@ -6724,54 +8049,105 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
}
define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fmin_noret_v2bf16__ofset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v3, v0 offset:65532
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-NEXT: v_min_num_f32_e32 v4, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fmin_noret_v2bf16__ofset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fmin_noret_v2bf16__ofset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fmin_noret_v2bf16__ofset:
; GFX942: ; %bb.0:
@@ -6812,50 +8188,96 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fmin_noret_v2bf16__ofset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v3, v0 offset:65532
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-NEXT: v_min_f32_e32 v4, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fmin_noret_v2bf16__ofset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fmin_noret_v2bf16__ofset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fmin_noret_v2bf16__ofset:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
index a8ef8ce..6879a7cf 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
@@ -1700,48 +1702,91 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; --------------------------------------------------------------------
define half @local_atomic_fsub_ret_f16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_ret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, v0, v4
-; GFX12-NEXT: v_add_f16_e32 v2, -4.0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, v0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v2, v4, v3, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v1, v2, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB8_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v0, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_ret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_ret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v0, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v0, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, v4, v3, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v1, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_ret_f16:
; GFX942: ; %bb.0:
@@ -1773,42 +1818,79 @@ define half @local_atomic_fsub_ret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_ret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, v0, v4
-; GFX11-NEXT: v_add_f16_e32 v2, -4.0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v2, v4, v3, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v1, v2, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB8_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_ret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_ret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, v0, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, v4, v3, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v1, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_ret_f16:
; GFX10: ; %bb.0:
@@ -2005,50 +2087,95 @@ define half @local_atomic_fsub_ret_f16(ptr addrspace(3) %ptr) nounwind {
}
define half @local_atomic_fsub_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_ret_f16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX12-NEXT: v_add_f16_e32 v3, -4.0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_ret_f16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_ret_f16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_ret_f16__offset:
; GFX942: ; %bb.0:
@@ -2081,44 +2208,83 @@ define half @local_atomic_fsub_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_ret_f16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX11-NEXT: v_add_f16_e32 v3, -4.0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB9_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_ret_f16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_ret_f16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_ret_f16__offset:
; GFX10: ; %bb.0:
@@ -2322,47 +2488,89 @@ define half @local_atomic_fsub_ret_f16__offset(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_noret_f16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB10_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_noret_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_noret_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_noret_f16:
; GFX942: ; %bb.0:
@@ -2393,41 +2601,77 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_noret_f16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB10_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_noret_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_noret_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_noret_f16:
; GFX10: ; %bb.0:
@@ -2616,48 +2860,91 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_noret_f16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB11_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_noret_f16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_noret_f16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_noret_f16__offset:
; GFX942: ; %bb.0:
@@ -2689,42 +2976,79 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_noret_f16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB11_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_noret_f16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_noret_f16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_noret_f16__offset:
; GFX10: ; %bb.0:
@@ -2920,39 +3244,73 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
}
define half @local_atomic_fsub_ret_f16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_ret_f16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f16_e32 v1, -4.0, v2
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB12_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_ret_f16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, -4.0, v2.l
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_ret_f16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_ret_f16__offset__align4:
; GFX942: ; %bb.0:
@@ -2977,33 +3335,61 @@ define half @local_atomic_fsub_ret_f16__offset__align4(ptr addrspace(3) %ptr) no
; GFX942-NEXT: v_mov_b32_e32 v0, v1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_ret_f16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f16_e32 v1, -4.0, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB12_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_ret_f16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, -4.0, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_ret_f16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_ret_f16__offset__align4:
; GFX10: ; %bb.0:
@@ -3158,37 +3544,69 @@ define half @local_atomic_fsub_ret_f16__offset__align4(ptr addrspace(3) %ptr) no
}
define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_noret_f16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB13_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_noret_f16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v2.l, -4.0, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_noret_f16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_noret_f16__offset__align4:
; GFX942: ; %bb.0:
@@ -3212,31 +3630,57 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_noret_f16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB13_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_noret_f16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v2.l, -4.0, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_noret_f16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_noret_f16__offset__align4:
; GFX10: ; %bb.0:
@@ -3389,57 +3833,110 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; --------------------------------------------------------------------
define bfloat @local_atomic_fsub_ret_bf16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_ret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v3, -4.0, v3
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB14_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_ret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_ret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_ret_bf16:
; GFX942: ; %bb.0:
@@ -3479,51 +3976,98 @@ define bfloat @local_atomic_fsub_ret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_ret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v0, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v3, -4.0, v3
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB14_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v0, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_ret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_ret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v0, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_ret_bf16:
; GFX10: ; %bb.0:
@@ -3743,59 +4287,114 @@ define bfloat @local_atomic_fsub_ret_bf16(ptr addrspace(3) %ptr) nounwind {
}
define bfloat @local_atomic_fsub_ret_bf16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_ret_bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB15_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v3, -4.0, v3
-; GFX12-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB15_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_ret_bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_ret_bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_ret_bf16__offset:
; GFX942: ; %bb.0:
@@ -3836,53 +4435,102 @@ define bfloat @local_atomic_fsub_ret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: v_lshrrev_b32_e32 v0, v0, v3
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_ret_bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB15_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, v1, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v3, -4.0, v3
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v3, v4, v2, v3
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB15_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, v1, v3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_ret_bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_ret_bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, v1, v3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_ret_bf16__offset:
; GFX10: ; %bb.0:
@@ -4109,56 +4757,108 @@ define bfloat @local_atomic_fsub_ret_bf16__offset(ptr addrspace(3) %ptr) nounwin
}
define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_noret_bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: ds_load_b32 v2, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX12-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_not_b32_e32 v3, v3
-; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB16_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_noret_bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_noret_bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_noret_bf16:
; GFX942: ; %bb.0:
@@ -4197,50 +4897,96 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_noret_bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v1, -4, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: ds_load_b32 v2, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
-; GFX11-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_not_b32_e32 v3, v3
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v4, v2, v3, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB16_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_noret_bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_noret_bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_noret_bf16:
; GFX10: ; %bb.0:
@@ -4452,57 +5198,110 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
}
define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_noret_bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX12-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_not_b32_e32 v2, v2
-; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB17_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_noret_bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
+; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_noret_bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_noret_bf16__offset:
; GFX942: ; %bb.0:
@@ -4542,51 +5341,98 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_noret_bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v0, -4, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 3, v1
-; GFX11-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_not_b32_e32 v2, v2
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB17_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_noret_bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
+; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_noret_bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0xfffe, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, -4, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v1, 0xffff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_noret_bf16__offset:
; GFX10: ; %bb.0:
@@ -4805,48 +5651,92 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
}
define bfloat @local_atomic_fsub_ret_bf16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_ret_bf16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB18_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX12-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB18_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: v_mov_b32_e32 v0, v1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_ret_bf16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_ret_bf16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_ret_bf16__offset__align4:
; GFX942: ; %bb.0:
@@ -4880,42 +5770,80 @@ define bfloat @local_atomic_fsub_ret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: v_mov_b32_e32 v0, v1
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_ret_bf16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB18_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB18_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_ret_bf16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_ret_bf16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_ret_bf16__offset__align4:
; GFX10: ; %bb.0:
@@ -5099,46 +6027,88 @@ define bfloat @local_atomic_fsub_ret_bf16__offset__align4(ptr addrspace(3) %ptr)
}
define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr) nounwind {
-; GFX12-LABEL: local_atomic_fsub_noret_bf16__offset__align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX12-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB19_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_noret_bf16__offset__align4:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, -4.0, v2
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
+; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_noret_bf16__offset__align4:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, -4.0, v2
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_noret_bf16__offset__align4:
; GFX942: ; %bb.0:
@@ -5171,40 +6141,76 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_noret_bf16__offset__align4:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v1, v0 offset:65534
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB19_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_noret_bf16__offset__align4:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, -4.0, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
+; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_noret_bf16__offset__align4:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v1, v0 offset:65534
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, -4.0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_noret_bf16__offset__align4:
; GFX10: ; %bb.0:
@@ -6388,57 +7394,111 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; --------------------------------------------------------------------
define <2 x bfloat> @local_atomic_fsub_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fsub_ret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v2, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX12-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB24_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_ret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_ret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_ret_v2bf16:
; GFX942: ; %bb.0:
@@ -6480,52 +7540,101 @@ define <2 x bfloat> @local_atomic_fsub_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bf
; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_ret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v2, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX11-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB24_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_ret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_ret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_ret_v2bf16:
; GFX10: ; %bb.0:
@@ -6767,57 +7876,111 @@ define <2 x bfloat> @local_atomic_fsub_ret_v2bf16(ptr addrspace(3) %ptr, <2 x bf
}
define <2 x bfloat> @local_atomic_fsub_ret_v2bf16__offset(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fsub_ret_v2bf16__offset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v2, v0 offset:65532
-; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX12-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX12-NEXT: v_sub_f32_e32 v2, v2, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX12-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX12-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB25_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_ret_v2bf16__offset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_ret_v2bf16__offset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_ret_v2bf16__offset:
; GFX942: ; %bb.0:
@@ -6859,52 +8022,101 @@ define <2 x bfloat> @local_atomic_fsub_ret_v2bf16__offset(ptr addrspace(3) %ptr,
; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_ret_v2bf16__offset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v2, v0 offset:65532
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX11-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
-; GFX11-NEXT: v_sub_f32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v2, v2
-; GFX11-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
-; GFX11-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB25_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_ret_v2bf16__offset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0xffff, v2, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_ret_v2bf16__offset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v2, v0 offset:65532
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v7, v9 :: v_dual_lshlrev_b32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v2, v2
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v5, v2, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v4 offset:65532
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_ret_v2bf16__offset:
; GFX10: ; %bb.0:
@@ -7147,54 +8359,105 @@ define <2 x bfloat> @local_atomic_fsub_ret_v2bf16__offset(ptr addrspace(3) %ptr,
}
define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fsub_noret_v2bf16:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v3, v0
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-NEXT: v_sub_f32_e32 v4, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB26_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_noret_v2bf16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_noret_v2bf16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_noret_v2bf16:
; GFX942: ; %bb.0:
@@ -7235,50 +8498,96 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_noret_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v3, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-NEXT: v_sub_f32_e32 v4, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB26_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_noret_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_noret_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_noret_v2bf16:
; GFX10: ; %bb.0:
@@ -7512,54 +8821,105 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
}
define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x bfloat> %val) {
-; GFX12-LABEL: local_atomic_fsub_noret_v2bf16__ofset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ds_load_b32 v3, v0 offset:65532
-; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX12-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX12-NEXT: s_mov_b32 s1, 0
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-NEXT: v_sub_f32_e32 v4, v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX12-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX12-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-TRUE16-LABEL: local_atomic_fsub_noret_v2bf16__ofset:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
+; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: local_atomic_fsub_noret_v2bf16__ofset:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
+; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: local_atomic_fsub_noret_v2bf16__ofset:
; GFX942: ; %bb.0:
@@ -7600,50 +8960,96 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: local_atomic_fsub_noret_v2bf16__ofset:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: ds_load_b32 v3, v0 offset:65532
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-NEXT: v_sub_f32_e32 v4, v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
-; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_set_inst_prefetch_distance 0x2
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: local_atomic_fsub_noret_v2bf16__ofset:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-TRUE16-NEXT: .p2align 6
+; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
+; GFX11-TRUE16-NEXT: v_bfi_b32 v4, 0xffff, v4, v5
+; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: buffer_gl0_inv
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: local_atomic_fsub_noret_v2bf16__ofset:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v0 offset:65532
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX11-FAKE16-NEXT: .p2align 6
+; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_gl0_inv
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: local_atomic_fsub_noret_v2bf16__ofset:
; GFX10: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-IR-lowering.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-IR-lowering.ll
index 85839bc..830d7cc 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-IR-lowering.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-IR-lowering.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,function(amdgpu-lower-kernel-arguments)' -S < %s | FileCheck -check-prefix=NO-PRELOAD %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,function(amdgpu-lower-kernel-arguments)' -amdgpu-kernarg-preload-count=16 -S < %s | FileCheck -check-prefix=PRELOAD %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments)' -S < %s | FileCheck -check-prefix=NO-PRELOAD %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments)' -amdgpu-kernarg-preload-count=16 -S < %s | FileCheck -check-prefix=PRELOAD %s
define amdgpu_kernel void @preload_block_count_x(ptr addrspace(1) %out) {
; NO-PRELOAD-LABEL: define amdgpu_kernel void @preload_block_count_x(
@@ -39,7 +39,7 @@ define amdgpu_kernel void @no_free_sgprs_block_count_x(ptr addrspace(1) %out, i5
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-LABEL: define amdgpu_kernel void @no_free_sgprs_block_count_x(
-; PRELOAD-SAME: ptr addrspace(1) inreg [[OUT:%.*]], i512 inreg [[TMP0:%.*]]) #[[ATTR0]] {
+; PRELOAD-SAME: ptr addrspace(1) inreg [[OUT:%.*]], i512 [[TMP0:%.*]]) #[[ATTR0]] {
; PRELOAD-NEXT: [[NO_FREE_SGPRS_BLOCK_COUNT_X_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(328) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
; PRELOAD-NEXT: [[IMP_ARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
; PRELOAD-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(4) [[IMP_ARG_PTR]], align 4
diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-debug-info.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-debug-info.ll
index 89c9801..1055abe 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-debug-info.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs-debug-info.ll
@@ -1,11 +1,11 @@
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,function(amdgpu-lower-kernel-arguments)' -amdgpu-kernarg-preload-count=16 -S < %s 2>&1 \
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments)' -amdgpu-kernarg-preload-count=16 -S < %s 2>&1 \
; RUN: | FileCheck --match-full-lines --implicit-check-not='declare' %s
; Confirms we do not leave behind a declaration which references the same
; DISubprogram metadata.
; CHECK: define amdgpu_kernel void @preload_block_count_x{{.*}} !dbg ![[#]] !max_work_group_size ![[#]] {
-; CHECK: declare void @0{{.*}} #[[#]]
+; CHECK-NOT: declare void @0{{.*}} #[[#]]
; CHECK: declare noundef align 4 ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #[[#]]
; CHECK: declare noundef align 4 ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr() #[[#]]
diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
index c26f092..79b531e 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
@@ -873,21 +873,17 @@ define amdgpu_kernel void @preload_block_count_z_workgroup_size_z_remainder_z(pt
;
; GFX90a-LABEL: preload_block_count_z_workgroup_size_z_remainder_z:
; GFX90a: ; %bb.1:
-; GFX90a-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
-; GFX90a-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
-; GFX90a-NEXT: s_load_dword s14, s[4:5], 0x18
+; GFX90a-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: s_branch .LBB22_0
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB22_0:
-; GFX90a-NEXT: s_load_dword s0, s[4:5], 0x1c
+; GFX90a-NEXT: s_lshr_b32 s0, s15, 16
; GFX90a-NEXT: s_and_b32 s1, s14, 0xffff
; GFX90a-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-NEXT: v_mov_b32_e32 v0, s12
; GFX90a-NEXT: v_mov_b32_e32 v1, s1
-; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-NEXT: s_lshr_b32 s0, s0, 16
; GFX90a-NEXT: v_mov_b32_e32 v2, s0
; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9]
; GFX90a-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs-IR-lowering.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs-IR-lowering.ll
index 91bfedd..1a445af 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs-IR-lowering.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs-IR-lowering.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-attributor -amdgpu-lower-kernel-arguments -S < %s | FileCheck -check-prefix=NO-PRELOAD %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-attributor -amdgpu-lower-kernel-arguments -amdgpu-kernarg-preload-count=1 -S < %s | FileCheck -check-prefix=PRELOAD-1 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-attributor -amdgpu-lower-kernel-arguments -amdgpu-kernarg-preload-count=3 -S < %s | FileCheck -check-prefix=PRELOAD-3 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-attributor -amdgpu-lower-kernel-arguments -amdgpu-kernarg-preload-count=8 -S < %s | FileCheck -check-prefix=PRELOAD-8 %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments)' -S < %s | FileCheck -check-prefix=NO-PRELOAD %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments)' -amdgpu-kernarg-preload-count=1 -S < %s | FileCheck -check-prefix=PRELOAD-1 %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments)' -amdgpu-kernarg-preload-count=3 -S < %s | FileCheck -check-prefix=PRELOAD-3 %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes='amdgpu-attributor,amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments)' -amdgpu-kernarg-preload-count=8 -S < %s | FileCheck -check-prefix=PRELOAD-8 %s
define amdgpu_kernel void @test_preload_IR_lowering_kernel_2(ptr addrspace(1) %in, ptr addrspace(1) %out) #0 {
; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_IR_lowering_kernel_2
@@ -185,7 +185,7 @@ define amdgpu_kernel void @test_preload_IR_lowering_kernel_8(ptr addrspace(1) %i
; PRELOAD-3-NEXT: ret void
;
; PRELOAD-8-LABEL: define {{[^@]+}}@test_preload_IR_lowering_kernel_8
-; PRELOAD-8-SAME: (ptr addrspace(1) inreg [[IN:%.*]], ptr addrspace(1) inreg [[IN1:%.*]], ptr addrspace(1) inreg [[IN2:%.*]], ptr addrspace(1) inreg [[IN3:%.*]], ptr addrspace(1) inreg [[OUT:%.*]], ptr addrspace(1) inreg [[OUT1:%.*]], ptr addrspace(1) inreg [[OUT2:%.*]], ptr addrspace(1) inreg [[OUT3:%.*]]) #[[ATTR0]] {
+; PRELOAD-8-SAME: (ptr addrspace(1) inreg [[IN:%.*]], ptr addrspace(1) inreg [[IN1:%.*]], ptr addrspace(1) inreg [[IN2:%.*]], ptr addrspace(1) inreg [[IN3:%.*]], ptr addrspace(1) inreg [[OUT:%.*]], ptr addrspace(1) inreg [[OUT1:%.*]], ptr addrspace(1) inreg [[OUT2:%.*]], ptr addrspace(1) [[OUT3:%.*]]) #[[ATTR0]] {
; PRELOAD-8-NEXT: [[TEST_PRELOAD_IR_LOWERING_KERNEL_8_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
; PRELOAD-8-NEXT: [[OUT3_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_8_KERNARG_SEGMENT]], i64 56
; PRELOAD-8-NEXT: [[OUT3_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT3_KERNARG_OFFSET]], align 8, !invariant.load [[META0:![0-9]+]]
@@ -220,14 +220,10 @@ define amdgpu_kernel void @test_preload_IR_lowering_kernel_4_inreg_offset(ptr ad
; NO-PRELOAD-NEXT: [[IN_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[IN_KERNARG_OFFSET]], align 16, !invariant.load [[META0]]
; NO-PRELOAD-NEXT: [[IN1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_KERNARG_SEGMENT]], i64 8
; NO-PRELOAD-NEXT: [[IN1_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[IN1_KERNARG_OFFSET]], align 8, !invariant.load [[META0]]
-; NO-PRELOAD-NEXT: [[OUT_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_KERNARG_SEGMENT]], i64 16
-; NO-PRELOAD-NEXT: [[OUT_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT_KERNARG_OFFSET]], align 16, !invariant.load [[META0]]
-; NO-PRELOAD-NEXT: [[OUT1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_KERNARG_SEGMENT]], i64 24
-; NO-PRELOAD-NEXT: [[OUT1_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT1_KERNARG_OFFSET]], align 8, !invariant.load [[META0]]
; NO-PRELOAD-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(1) [[IN_LOAD]], align 4
; NO-PRELOAD-NEXT: [[LOAD1:%.*]] = load i32, ptr addrspace(1) [[IN1_LOAD]], align 4
-; NO-PRELOAD-NEXT: store i32 [[LOAD]], ptr addrspace(1) [[OUT_LOAD]], align 4
-; NO-PRELOAD-NEXT: store i32 [[LOAD1]], ptr addrspace(1) [[OUT1_LOAD]], align 4
+; NO-PRELOAD-NEXT: store i32 [[LOAD]], ptr addrspace(1) [[OUT]], align 4
+; NO-PRELOAD-NEXT: store i32 [[LOAD1]], ptr addrspace(1) [[OUT1]], align 4
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_IR_lowering_kernel_4_inreg_offset
@@ -235,14 +231,10 @@ define amdgpu_kernel void @test_preload_IR_lowering_kernel_4_inreg_offset(ptr ad
; PRELOAD-1-NEXT: [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(32) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
; PRELOAD-1-NEXT: [[IN1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_KERNARG_SEGMENT]], i64 8
; PRELOAD-1-NEXT: [[IN1_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[IN1_KERNARG_OFFSET]], align 8, !invariant.load [[META0]]
-; PRELOAD-1-NEXT: [[OUT_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_KERNARG_SEGMENT]], i64 16
-; PRELOAD-1-NEXT: [[OUT_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT_KERNARG_OFFSET]], align 16, !invariant.load [[META0]]
-; PRELOAD-1-NEXT: [[OUT1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_KERNARG_SEGMENT]], i64 24
-; PRELOAD-1-NEXT: [[OUT1_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT1_KERNARG_OFFSET]], align 8, !invariant.load [[META0]]
; PRELOAD-1-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(1) [[IN]], align 4
; PRELOAD-1-NEXT: [[LOAD1:%.*]] = load i32, ptr addrspace(1) [[IN1_LOAD]], align 4
-; PRELOAD-1-NEXT: store i32 [[LOAD]], ptr addrspace(1) [[OUT_LOAD]], align 4
-; PRELOAD-1-NEXT: store i32 [[LOAD1]], ptr addrspace(1) [[OUT1_LOAD]], align 4
+; PRELOAD-1-NEXT: store i32 [[LOAD]], ptr addrspace(1) [[OUT]], align 4
+; PRELOAD-1-NEXT: store i32 [[LOAD1]], ptr addrspace(1) [[OUT1]], align 4
; PRELOAD-1-NEXT: ret void
;
; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_IR_lowering_kernel_4_inreg_offset
@@ -270,22 +262,16 @@ define amdgpu_kernel void @test_preload_IR_lowering_kernel_4_inreg_offset(ptr ad
ret void
}
-; Only preload the first sequence of arguments with the inreg attribute. In the NO-PRELOAD case this is just the first argument.
-
define amdgpu_kernel void @test_preload_IR_lowering_kernel_4_inreg_offset_two_sequence(ptr addrspace(1) inreg %in, ptr addrspace(1) %in1, ptr addrspace(1) inreg %out, ptr addrspace(1) inreg %out1) #0 {
; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_IR_lowering_kernel_4_inreg_offset_two_sequence
; NO-PRELOAD-SAME: (ptr addrspace(1) inreg [[IN:%.*]], ptr addrspace(1) [[IN1:%.*]], ptr addrspace(1) inreg [[OUT:%.*]], ptr addrspace(1) inreg [[OUT1:%.*]]) #[[ATTR0]] {
; NO-PRELOAD-NEXT: [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_TWO_SEQUENCE_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(32) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
; NO-PRELOAD-NEXT: [[IN1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_TWO_SEQUENCE_KERNARG_SEGMENT]], i64 8
; NO-PRELOAD-NEXT: [[IN1_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[IN1_KERNARG_OFFSET]], align 8, !invariant.load [[META0]]
-; NO-PRELOAD-NEXT: [[OUT_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_TWO_SEQUENCE_KERNARG_SEGMENT]], i64 16
-; NO-PRELOAD-NEXT: [[OUT_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT_KERNARG_OFFSET]], align 16, !invariant.load [[META0]]
-; NO-PRELOAD-NEXT: [[OUT1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_TWO_SEQUENCE_KERNARG_SEGMENT]], i64 24
-; NO-PRELOAD-NEXT: [[OUT1_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT1_KERNARG_OFFSET]], align 8, !invariant.load [[META0]]
; NO-PRELOAD-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(1) [[IN]], align 4
; NO-PRELOAD-NEXT: [[LOAD1:%.*]] = load i32, ptr addrspace(1) [[IN1_LOAD]], align 4
-; NO-PRELOAD-NEXT: store i32 [[LOAD]], ptr addrspace(1) [[OUT_LOAD]], align 4
-; NO-PRELOAD-NEXT: store i32 [[LOAD1]], ptr addrspace(1) [[OUT1_LOAD]], align 4
+; NO-PRELOAD-NEXT: store i32 [[LOAD]], ptr addrspace(1) [[OUT]], align 4
+; NO-PRELOAD-NEXT: store i32 [[LOAD1]], ptr addrspace(1) [[OUT1]], align 4
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_IR_lowering_kernel_4_inreg_offset_two_sequence
@@ -293,14 +279,10 @@ define amdgpu_kernel void @test_preload_IR_lowering_kernel_4_inreg_offset_two_se
; PRELOAD-1-NEXT: [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_TWO_SEQUENCE_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(32) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
; PRELOAD-1-NEXT: [[IN1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_TWO_SEQUENCE_KERNARG_SEGMENT]], i64 8
; PRELOAD-1-NEXT: [[IN1_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[IN1_KERNARG_OFFSET]], align 8, !invariant.load [[META0]]
-; PRELOAD-1-NEXT: [[OUT_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_TWO_SEQUENCE_KERNARG_SEGMENT]], i64 16
-; PRELOAD-1-NEXT: [[OUT_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT_KERNARG_OFFSET]], align 16, !invariant.load [[META0]]
-; PRELOAD-1-NEXT: [[OUT1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TEST_PRELOAD_IR_LOWERING_KERNEL_4_INREG_OFFSET_TWO_SEQUENCE_KERNARG_SEGMENT]], i64 24
-; PRELOAD-1-NEXT: [[OUT1_LOAD:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[OUT1_KERNARG_OFFSET]], align 8, !invariant.load [[META0]]
; PRELOAD-1-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(1) [[IN]], align 4
; PRELOAD-1-NEXT: [[LOAD1:%.*]] = load i32, ptr addrspace(1) [[IN1_LOAD]], align 4
-; PRELOAD-1-NEXT: store i32 [[LOAD]], ptr addrspace(1) [[OUT_LOAD]], align 4
-; PRELOAD-1-NEXT: store i32 [[LOAD1]], ptr addrspace(1) [[OUT1_LOAD]], align 4
+; PRELOAD-1-NEXT: store i32 [[LOAD]], ptr addrspace(1) [[OUT]], align 4
+; PRELOAD-1-NEXT: store i32 [[LOAD1]], ptr addrspace(1) [[OUT1]], align 4
; PRELOAD-1-NEXT: ret void
;
; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_IR_lowering_kernel_4_inreg_offset_two_sequence
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
deleted file mode 100644
index 20edbd6..0000000
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
+++ /dev/null
@@ -1,263 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=amdgpu-attributor -S < %s | FileCheck -check-prefix=NO-PRELOAD %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=1 -passes=amdgpu-attributor -S < %s | FileCheck -check-prefix=PRELOAD-1 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=3 -passes=amdgpu-attributor -S < %s | FileCheck -check-prefix=PRELOAD-3 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=16 -passes=amdgpu-attributor -S < %s | FileCheck -check-prefix=PRELOAD-16 %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=20 -passes=amdgpu-attributor -S < %s | FileCheck -check-prefix=PRELOAD-20 %s
-
-define amdgpu_kernel void @test_preload_hint_kernel_1(ptr %0) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1
-; NO-PRELOAD-SAME: (ptr [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1
-; PRELOAD-1-SAME: (ptr inreg [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1
-; PRELOAD-3-SAME: (ptr inreg [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1
-; PRELOAD-16-SAME: (ptr inreg [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1
-; PRELOAD-20-SAME: (ptr inreg [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
-; PRELOAD-20-NEXT: ret void
-;
- ret void
-}
-
-define amdgpu_kernel void @test_preload_hint_kernel_2(i32 %0, i64 %1) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2
-; NO-PRELOAD-SAME: (i32 [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR0]] {
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2
-; PRELOAD-1-SAME: (i32 inreg [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR0]] {
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2
-; PRELOAD-3-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]]) #[[ATTR0]] {
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2
-; PRELOAD-16-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]]) #[[ATTR0]] {
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2
-; PRELOAD-20-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]]) #[[ATTR0]] {
-; PRELOAD-20-NEXT: ret void
-;
- ret void
-}
-
-define amdgpu_kernel void @test_preload_hint_kernel_4(i32 %0, i64 %1, <2 x float> %2, ptr %3) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_4
-; NO-PRELOAD-SAME: (i32 [[TMP0:%.*]], i64 [[TMP1:%.*]], <2 x float> [[TMP2:%.*]], ptr [[TMP3:%.*]]) #[[ATTR0]] {
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_4
-; PRELOAD-1-SAME: (i32 inreg [[TMP0:%.*]], i64 [[TMP1:%.*]], <2 x float> [[TMP2:%.*]], ptr [[TMP3:%.*]]) #[[ATTR0]] {
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_4
-; PRELOAD-3-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]], <2 x float> inreg [[TMP2:%.*]], ptr [[TMP3:%.*]]) #[[ATTR0]] {
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_4
-; PRELOAD-16-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]], <2 x float> inreg [[TMP2:%.*]], ptr inreg [[TMP3:%.*]]) #[[ATTR0]] {
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_4
-; PRELOAD-20-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]], <2 x float> inreg [[TMP2:%.*]], ptr inreg [[TMP3:%.*]]) #[[ATTR0]] {
-; PRELOAD-20-NEXT: ret void
-;
- ret void
-}
-
-define amdgpu_kernel void @test_preload_hint_kernel_18(i32 %0, i64 %1, <2 x float> %2, ptr %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16, i32 %17) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_18
-; NO-PRELOAD-SAME: (i32 [[TMP0:%.*]], i64 [[TMP1:%.*]], <2 x float> [[TMP2:%.*]], ptr [[TMP3:%.*]], i32 [[TMP4:%.*]], i32 [[TMP5:%.*]], i32 [[TMP6:%.*]], i32 [[TMP7:%.*]], i32 [[TMP8:%.*]], i32 [[TMP9:%.*]], i32 [[TMP10:%.*]], i32 [[TMP11:%.*]], i32 [[TMP12:%.*]], i32 [[TMP13:%.*]], i32 [[TMP14:%.*]], i32 [[TMP15:%.*]], i32 [[TMP16:%.*]], i32 [[TMP17:%.*]]) #[[ATTR0]] {
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_18
-; PRELOAD-1-SAME: (i32 inreg [[TMP0:%.*]], i64 [[TMP1:%.*]], <2 x float> [[TMP2:%.*]], ptr [[TMP3:%.*]], i32 [[TMP4:%.*]], i32 [[TMP5:%.*]], i32 [[TMP6:%.*]], i32 [[TMP7:%.*]], i32 [[TMP8:%.*]], i32 [[TMP9:%.*]], i32 [[TMP10:%.*]], i32 [[TMP11:%.*]], i32 [[TMP12:%.*]], i32 [[TMP13:%.*]], i32 [[TMP14:%.*]], i32 [[TMP15:%.*]], i32 [[TMP16:%.*]], i32 [[TMP17:%.*]]) #[[ATTR0]] {
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_18
-; PRELOAD-3-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]], <2 x float> inreg [[TMP2:%.*]], ptr [[TMP3:%.*]], i32 [[TMP4:%.*]], i32 [[TMP5:%.*]], i32 [[TMP6:%.*]], i32 [[TMP7:%.*]], i32 [[TMP8:%.*]], i32 [[TMP9:%.*]], i32 [[TMP10:%.*]], i32 [[TMP11:%.*]], i32 [[TMP12:%.*]], i32 [[TMP13:%.*]], i32 [[TMP14:%.*]], i32 [[TMP15:%.*]], i32 [[TMP16:%.*]], i32 [[TMP17:%.*]]) #[[ATTR0]] {
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_18
-; PRELOAD-16-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]], <2 x float> inreg [[TMP2:%.*]], ptr inreg [[TMP3:%.*]], i32 inreg [[TMP4:%.*]], i32 inreg [[TMP5:%.*]], i32 inreg [[TMP6:%.*]], i32 inreg [[TMP7:%.*]], i32 inreg [[TMP8:%.*]], i32 inreg [[TMP9:%.*]], i32 inreg [[TMP10:%.*]], i32 inreg [[TMP11:%.*]], i32 inreg [[TMP12:%.*]], i32 inreg [[TMP13:%.*]], i32 inreg [[TMP14:%.*]], i32 inreg [[TMP15:%.*]], i32 [[TMP16:%.*]], i32 [[TMP17:%.*]]) #[[ATTR0]] {
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_18
-; PRELOAD-20-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]], <2 x float> inreg [[TMP2:%.*]], ptr inreg [[TMP3:%.*]], i32 inreg [[TMP4:%.*]], i32 inreg [[TMP5:%.*]], i32 inreg [[TMP6:%.*]], i32 inreg [[TMP7:%.*]], i32 inreg [[TMP8:%.*]], i32 inreg [[TMP9:%.*]], i32 inreg [[TMP10:%.*]], i32 inreg [[TMP11:%.*]], i32 inreg [[TMP12:%.*]], i32 inreg [[TMP13:%.*]], i32 inreg [[TMP14:%.*]], i32 inreg [[TMP15:%.*]], i32 [[TMP16:%.*]], i32 [[TMP17:%.*]]) #[[ATTR0]] {
-; PRELOAD-20-NEXT: ret void
-;
- ret void
-}
-
-define void @test_preload_hint_non_kernel_2(i32 %0, i64 %1) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_non_kernel_2
-; NO-PRELOAD-SAME: (i32 [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] {
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_non_kernel_2
-; PRELOAD-1-SAME: (i32 [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] {
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_non_kernel_2
-; PRELOAD-3-SAME: (i32 [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] {
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_non_kernel_2
-; PRELOAD-16-SAME: (i32 [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] {
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_non_kernel_2
-; PRELOAD-20-SAME: (i32 [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] {
-; PRELOAD-20-NEXT: ret void
-;
- ret void
-}
-
-define amdgpu_kernel void @test_preload_hint_kernel_1_call_func(ptr %0) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_func
-; NO-PRELOAD-SAME: (ptr [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] {
-; NO-PRELOAD-NEXT: call void @func(ptr [[TMP0]])
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_func
-; PRELOAD-1-SAME: (ptr inreg [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] {
-; PRELOAD-1-NEXT: call void @func(ptr [[TMP0]])
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_func
-; PRELOAD-3-SAME: (ptr inreg [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] {
-; PRELOAD-3-NEXT: call void @func(ptr [[TMP0]])
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_func
-; PRELOAD-16-SAME: (ptr inreg [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] {
-; PRELOAD-16-NEXT: call void @func(ptr [[TMP0]])
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_func
-; PRELOAD-20-SAME: (ptr inreg [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] {
-; PRELOAD-20-NEXT: call void @func(ptr [[TMP0]])
-; PRELOAD-20-NEXT: ret void
-;
- call void @func(ptr %0)
- ret void
-}
-
-define amdgpu_kernel void @test_preload_hint_kernel_1_call_intrinsic(i16 %0) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; NO-PRELOAD-SAME: (i16 [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
-; NO-PRELOAD-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-1-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
-; PRELOAD-1-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-3-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
-; PRELOAD-3-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-16-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
-; PRELOAD-16-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-20-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
-; PRELOAD-20-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
-; PRELOAD-20-NEXT: ret void
-;
- call void @llvm.amdgcn.set.prio(i16 %0)
- ret void
-}
-
-define spir_kernel void @test_preload_hint_kernel_1_spir_cc(ptr %0) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_spir_cc
-; NO-PRELOAD-SAME: (ptr [[TMP0:%.*]]) #[[ATTR0]] {
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_spir_cc
-; PRELOAD-1-SAME: (ptr [[TMP0:%.*]]) #[[ATTR0]] {
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_spir_cc
-; PRELOAD-3-SAME: (ptr [[TMP0:%.*]]) #[[ATTR0]] {
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_spir_cc
-; PRELOAD-16-SAME: (ptr [[TMP0:%.*]]) #[[ATTR0]] {
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_spir_cc
-; PRELOAD-20-SAME: (ptr [[TMP0:%.*]]) #[[ATTR0]] {
-; PRELOAD-20-NEXT: ret void
-;
- ret void
-}
-
-define amdgpu_kernel void @test_preload_hint_kernel_2_preexisting(i32 inreg %0, i64 %1) #0 {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2_preexisting
-; NO-PRELOAD-SAME: (i32 inreg [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR0]] {
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2_preexisting
-; PRELOAD-1-SAME: (i32 inreg [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR0]] {
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2_preexisting
-; PRELOAD-3-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]]) #[[ATTR0]] {
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2_preexisting
-; PRELOAD-16-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]]) #[[ATTR0]] {
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_2_preexisting
-; PRELOAD-20-SAME: (i32 inreg [[TMP0:%.*]], i64 inreg [[TMP1:%.*]]) #[[ATTR0]] {
-; PRELOAD-20-NEXT: ret void
-;
- ret void
-}
-
-define amdgpu_kernel void @test_preload_hint_kernel_incompatible_attributes(ptr addrspace(4) byref(i32) %0, ptr nest %1) {
-; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; NO-PRELOAD-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
-; NO-PRELOAD-NEXT: ret void
-;
-; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-1-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
-; PRELOAD-1-NEXT: ret void
-;
-; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-3-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
-; PRELOAD-3-NEXT: ret void
-;
-; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-16-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
-; PRELOAD-16-NEXT: ret void
-;
-; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-20-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
-; PRELOAD-20-NEXT: ret void
-;
- ret void
-}
-
-declare void @func(ptr) #0
-declare void @llvm.amdgcn.set.prio(i16)
-
-attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
index 7ae0c11..41fe0d4 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
@@ -927,18 +927,17 @@ define amdgpu_kernel void @half_v7bfloat_kernel_preload_arg(ptr addrspace(1) inr
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB23_0:
-; GFX90a-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
-; GFX90a-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x20
+; GFX90a-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x20
; GFX90a-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-NEXT: global_store_short v3, v0, s[8:9]
+; GFX90a-NEXT: v_mov_b32_e32 v0, s15
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-NEXT: v_mov_b32_e32 v0, s3
-; GFX90a-NEXT: global_store_short v3, v0, s[6:7] offset:12
-; GFX90a-NEXT: v_mov_b32_e32 v2, s2
-; GFX90a-NEXT: v_mov_b32_e32 v0, s0
-; GFX90a-NEXT: v_mov_b32_e32 v1, s1
-; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-NEXT: global_store_short v3, v0, s[0:1] offset:12
+; GFX90a-NEXT: v_mov_b32_e32 v2, s14
+; GFX90a-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[0:1]
; GFX90a-NEXT: s_endpgm
store half %in, ptr addrspace(1) %out
store <7 x bfloat> %in2, ptr addrspace(1) %out2
@@ -1172,16 +1171,15 @@ define amdgpu_kernel void @i16_v3i32_kernel_preload_arg(ptr addrspace(1) inreg %
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB29_0:
-; GFX90a-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x20
; GFX90a-NEXT: v_mov_b32_e32 v3, 0
-; GFX90a-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x20
; GFX90a-NEXT: v_mov_b32_e32 v4, s10
-; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-NEXT: v_mov_b32_e32 v0, s0
-; GFX90a-NEXT: v_mov_b32_e32 v1, s1
-; GFX90a-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NEXT: v_mov_b32_e32 v0, s12
+; GFX90a-NEXT: v_mov_b32_e32 v1, s13
+; GFX90a-NEXT: v_mov_b32_e32 v2, s14
; GFX90a-NEXT: global_store_short v3, v4, s[8:9]
-; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[4:5]
+; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[0:1]
; GFX90a-NEXT: s_endpgm
store i16 %in, ptr addrspace(1) %out
store <3 x i32> %in2, ptr addrspace(1) %out2
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-vgpr-to-sgpr-return.ll b/llvm/test/CodeGen/AMDGPU/uniform-vgpr-to-sgpr-return.ll
index 5476c26..14b9179 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-vgpr-to-sgpr-return.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-vgpr-to-sgpr-return.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 | FileCheck %s -check-prefixes=GFX11
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 | FileCheck %s -check-prefixes=GFX11,GFX11-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
define amdgpu_ps i32 @uniform_v_to_s_i32(float inreg %a, float inreg %b) {
; GFX11-LABEL: uniform_v_to_s_i32:
@@ -104,14 +105,23 @@ define amdgpu_ps <2 x i16> @uniform_v_to_s_2_i16(float inreg %a, float inreg %b)
}
define amdgpu_ps i16 @uniform_v_to_s_i16(half inreg %a, half inreg %b) {
-; GFX11-LABEL: uniform_v_to_s_i16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: v_max_f16_e64 v0, s0, s1
-; GFX11-NEXT: v_cmp_o_f16_e64 vcc_lo, s0, s1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, 0x7e00, v0, vcc_lo
-; GFX11-NEXT: v_readfirstlane_b32 s0, v0
-; GFX11-NEXT: ; return to shader part epilog
+; GFX11-TRUE16-LABEL: uniform_v_to_s_i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s2, s0, s1
+; GFX11-TRUE16-NEXT: v_max_f16_e64 v0.l, s0, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
+; GFX11-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX11-FAKE16-LABEL: uniform_v_to_s_i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: v_max_f16_e64 v0, s0, s1
+; GFX11-FAKE16-NEXT: v_cmp_o_f16_e64 vcc_lo, s0, s1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x7e00, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
+; GFX11-FAKE16-NEXT: ; return to shader part epilog
%max = call half @llvm.maximum.f16(half %a, half %b)
%cast = bitcast half %max to i16
ret i16 %cast
diff --git a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
index 40a4d4a..86fc0ac 100644
--- a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
@@ -1,13 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=SDAG-VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=SDAG-GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1101 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11,SDAG-GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1101 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11,SDAG-GFX11,SDAG-GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1101 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11,SDAG-GFX11,SDAG-GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=SDAG-GFX12,SDAG-GFX12-TRUE16 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=SDAG-GFX12,SDAG-GFX12-FAKE16 %s
; RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GISEL-VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GISEL-GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1101 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX11,GISEL-GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1101 -mattr=+real-true16 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX11,GISEL-GFX11,GISEL-GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1101 -mattr=-real-true16 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX11,GISEL-GFX11,GISEL-GFX11-FAKE16 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GISEL-GFX12,GISEL-GFX12-TRUE16 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GISEL-GFX12,GISEL-GFX12-FAKE16 %s
@@ -41,14 +43,21 @@ define <2 x i16> @basic_smax_smin(i16 %src0, i16 %src1) {
; SDAG-GFX9-NEXT: v_perm_b32 v0, v1, v0, s4
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; SDAG-GFX11-LABEL: basic_smax_smin:
-; SDAG-GFX11: ; %bb.0:
-; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; SDAG-GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -95,15 +104,22 @@ define <2 x i16> @basic_smax_smin(i16 %src0, i16 %src1) {
; GISEL-GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GISEL-GFX11-LABEL: basic_smax_smin:
-; GISEL-GFX11: ; %bb.0:
-; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; GISEL-GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GISEL-GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+; GISEL-GFX11-TRUE16-LABEL: basic_smax_smin:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_smax_smin:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-GFX12-TRUE16-LABEL: basic_smax_smin:
; GISEL-GFX12-TRUE16: ; %bb.0:
@@ -169,18 +185,31 @@ define amdgpu_kernel void @basic_smax_smin_sgpr(ptr addrspace(1) %out, i32 inreg
; SDAG-GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; SDAG-GFX9-NEXT: s_endpgm
;
-; SDAG-GFX11-LABEL: basic_smax_smin_sgpr:
-; SDAG-GFX11: ; %bb.0:
-; SDAG-GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; SDAG-GFX11-NEXT: v_mov_b32_e32 v2, 0
-; SDAG-GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX11-NEXT: v_med3_i16 v0, s2, 0, 0xff
-; SDAG-GFX11-NEXT: v_med3_i16 v1, s3, 0, 0xff
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SDAG-GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; SDAG-GFX11-NEXT: global_store_b32 v2, v0, s[0:1]
-; SDAG-GFX11-NEXT: s_endpgm
+; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin_sgpr:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; SDAG-GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, s2, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v1.l, s3, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SDAG-GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; SDAG-GFX11-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
+; SDAG-GFX11-TRUE16-NEXT: s_endpgm
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin_sgpr:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; SDAG-GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v0, s2, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v1, s3, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SDAG-GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; SDAG-GFX11-FAKE16-NEXT: global_store_b32 v2, v0, s[0:1]
+; SDAG-GFX11-FAKE16-NEXT: s_endpgm
;
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin_sgpr:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -320,14 +349,21 @@ define <2 x i16> @basic_smin_smax(i16 %src0, i16 %src1) {
; SDAG-GFX9-NEXT: v_perm_b32 v0, v1, v0, s4
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; SDAG-GFX11-LABEL: basic_smin_smax:
-; SDAG-GFX11: ; %bb.0:
-; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; SDAG-GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_smin_smax:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smin_smax:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_smin_smax:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -374,15 +410,22 @@ define <2 x i16> @basic_smin_smax(i16 %src0, i16 %src1) {
; GISEL-GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GISEL-GFX11-LABEL: basic_smin_smax:
-; GISEL-GFX11: ; %bb.0:
-; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; GISEL-GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GISEL-GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+; GISEL-GFX11-TRUE16-LABEL: basic_smin_smax:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_smin_smax:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-GFX12-TRUE16-LABEL: basic_smin_smax:
; GISEL-GFX12-TRUE16: ; %bb.0:
@@ -440,14 +483,21 @@ define <2 x i16> @basic_smin_smax_combined(i16 %src0, i16 %src1) {
; SDAG-GFX9-NEXT: v_perm_b32 v0, v1, v0, s4
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; SDAG-GFX11-LABEL: basic_smin_smax_combined:
-; SDAG-GFX11: ; %bb.0:
-; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; SDAG-GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_smin_smax_combined:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smin_smax_combined:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_smin_smax_combined:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -494,15 +544,22 @@ define <2 x i16> @basic_smin_smax_combined(i16 %src0, i16 %src1) {
; GISEL-GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GISEL-GFX11-LABEL: basic_smin_smax_combined:
-; GISEL-GFX11: ; %bb.0:
-; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; GISEL-GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GISEL-GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+; GISEL-GFX11-TRUE16-LABEL: basic_smin_smax_combined:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_smin_smax_combined:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-GFX12-TRUE16-LABEL: basic_smin_smax_combined:
; GISEL-GFX12-TRUE16: ; %bb.0:
@@ -886,15 +943,25 @@ define i16 @basic_smax_smin_bit_or(i16 %src0, i16 %src1) {
; SDAG-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: basic_smax_smin_bit_or:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin_bit_or:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
+; SDAG-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin_bit_or:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; SDAG-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin_bit_or:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -945,6 +1012,26 @@ define i16 @basic_smax_smin_bit_or(i16 %src0, i16 %src1) {
; GISEL-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
+; GISEL-GFX11-TRUE16-LABEL: basic_smax_smin_bit_or:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
+; GISEL-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_smax_smin_bit_or:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; GISEL-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
; GISEL-GFX12-TRUE16-LABEL: basic_smax_smin_bit_or:
; GISEL-GFX12-TRUE16: ; %bb.0:
; GISEL-GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -1001,15 +1088,25 @@ define i16 @basic_umax_umin_bit_or(i16 %src0, i16 %src1) {
; SDAG-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: basic_umax_umin_bit_or:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_min_u16 v1, 0xff, v1
-; GFX11-NEXT: v_min_u16 v0, 0xff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_umax_umin_bit_or:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_min_u16 v0.h, 0xff, v1.l
+; SDAG-GFX11-TRUE16-NEXT: v_min_u16 v0.l, 0xff, v0.l
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
+; SDAG-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_umax_umin_bit_or:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_min_u16 v1, 0xff, v1
+; SDAG-GFX11-FAKE16-NEXT: v_min_u16 v0, 0xff, v0
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; SDAG-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_umax_umin_bit_or:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -1057,6 +1154,26 @@ define i16 @basic_umax_umin_bit_or(i16 %src0, i16 %src1) {
; GISEL-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
+; GISEL-GFX11-TRUE16-LABEL: basic_umax_umin_bit_or:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_min_u16 v0.h, 0xff, v1.l
+; GISEL-GFX11-TRUE16-NEXT: v_min_u16 v0.l, 0xff, v0.l
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
+; GISEL-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_umax_umin_bit_or:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_min_u16 v1, 0xff, v1
+; GISEL-GFX11-FAKE16-NEXT: v_min_u16 v0, 0xff, v0
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; GISEL-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
; GISEL-GFX12-TRUE16-LABEL: basic_umax_umin_bit_or:
; GISEL-GFX12-TRUE16: ; %bb.0:
; GISEL-GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -1116,17 +1233,29 @@ define i16 @basic_smax_smin_vec_cast(i16 %src0, i16 %src1) {
; SDAG-GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; SDAG-GFX11-LABEL: basic_smax_smin_vec_cast:
-; SDAG-GFX11: ; %bb.0:
-; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; SDAG-GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; SDAG-GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; SDAG-GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin_vec_cast:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
+; SDAG-GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin_vec_cast:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; SDAG-GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin_vec_cast:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -1181,15 +1310,25 @@ define i16 @basic_smax_smin_vec_cast(i16 %src0, i16 %src1) {
; GISEL-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GISEL-GFX11-LABEL: basic_smax_smin_vec_cast:
-; GISEL-GFX11: ; %bb.0:
-; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT: v_med3_i16 v1, v1, 0, 0xff
-; GISEL-GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; GISEL-GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+; GISEL-GFX11-TRUE16-LABEL: basic_smax_smin_vec_cast:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.h, v1.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
+; GISEL-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_smax_smin_vec_cast:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v1, v1, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; GISEL-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-GFX12-TRUE16-LABEL: basic_smax_smin_vec_cast:
; GISEL-GFX12-TRUE16: ; %bb.0:
@@ -1250,15 +1389,25 @@ define i16 @basic_smax_smin_bit_shl(i16 %src0, i16 %src1) {
; SDAG-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: basic_smax_smin_bit_shl:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_i16 v1, v1, 0
-; GFX11-NEXT: v_med3_i16 v0, v0, 0, 0xff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin_bit_shl:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_max_i16 v0.h, v1.l, 0
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
+; SDAG-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin_bit_shl:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_max_i16 v1, v1, 0
+; SDAG-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; SDAG-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin_bit_shl:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -1308,6 +1457,26 @@ define i16 @basic_smax_smin_bit_shl(i16 %src0, i16 %src1) {
; GISEL-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
+; GISEL-GFX11-TRUE16-LABEL: basic_smax_smin_bit_shl:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_max_i16 v0.h, v1.l, 0
+; GISEL-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, v0.l, 0, 0xff
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
+; GISEL-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_smax_smin_bit_shl:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_max_i16 v1, v1, 0
+; GISEL-GFX11-FAKE16-NEXT: v_med3_i16 v0, v0, 0, 0xff
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; GISEL-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
; GISEL-GFX12-TRUE16-LABEL: basic_smax_smin_bit_shl:
; GISEL-GFX12-TRUE16: ; %bb.0:
; GISEL-GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -1367,17 +1536,28 @@ define i16 @basic_smax_smin_vec_input(<2 x i16> %src) {
; SDAG-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; SDAG-GFX11-LABEL: basic_smax_smin_vec_input:
-; SDAG-GFX11: ; %bb.0:
-; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-GFX11-NEXT: v_pk_min_i16 v0, 0xff, v0 op_sel_hi:[0,1]
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_pk_max_i16 v0, v0, 0
-; SDAG-GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; SDAG-GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin_vec_input:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_pk_min_i16 v0, 0xff, v0 op_sel_hi:[0,1]
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_pk_max_i16 v1, v0, 0
+; SDAG-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v1.h
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin_vec_input:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_pk_min_i16 v0, 0xff, v0 op_sel_hi:[0,1]
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_pk_max_i16 v0, v0, 0
+; SDAG-GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; SDAG-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin_vec_input:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -1434,20 +1614,34 @@ define i16 @basic_smax_smin_vec_input(<2 x i16> %src) {
; GISEL-GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GISEL-GFX11-LABEL: basic_smax_smin_vec_input:
-; GISEL-GFX11: ; %bb.0:
-; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT: v_pk_min_i16 v0, 0xff00ff, v0
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_pk_max_i16 v0, 0, v0
-; GISEL-GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GISEL-GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GISEL-GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+; GISEL-GFX11-TRUE16-LABEL: basic_smax_smin_vec_input:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_pk_min_i16 v0, 0xff00ff, v0
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-TRUE16-NEXT: v_pk_max_i16 v1, 0, v0
+; GISEL-GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v1.h
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GISEL-GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v1.l
+; GISEL-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v0.l
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.h, v0.l
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_smax_smin_vec_input:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_pk_min_i16 v0, 0xff00ff, v0
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_pk_max_i16 v0, 0, v0
+; GISEL-GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GISEL-GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GISEL-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-GFX12-TRUE16-LABEL: basic_smax_smin_vec_input:
; GISEL-GFX12-TRUE16: ; %bb.0:
@@ -1516,17 +1710,28 @@ define i16 @basic_smax_smin_vec_input_rev(<2 x i16> %src) {
; SDAG-GFX9-NEXT: v_or_b32_e32 v0, v0, v1
; SDAG-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; SDAG-GFX11-LABEL: basic_smax_smin_vec_input_rev:
-; SDAG-GFX11: ; %bb.0:
-; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-GFX11-NEXT: v_pk_max_i16 v0, v0, 0
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_pk_min_i16 v0, 0xff, v0 op_sel_hi:[0,1]
-; SDAG-GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; SDAG-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; SDAG-GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin_vec_input_rev:
+; SDAG-GFX11-TRUE16: ; %bb.0:
+; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-TRUE16-NEXT: v_pk_max_i16 v0, v0, 0
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_pk_min_i16 v1, 0xff, v0 op_sel_hi:[0,1]
+; SDAG-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v1.h
+; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l
+; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin_vec_input_rev:
+; SDAG-GFX11-FAKE16: ; %bb.0:
+; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-FAKE16-NEXT: v_pk_max_i16 v0, v0, 0
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_pk_min_i16 v0, 0xff, v0 op_sel_hi:[0,1]
+; SDAG-GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; SDAG-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin_vec_input_rev:
; SDAG-GFX12-TRUE16: ; %bb.0:
@@ -1582,20 +1787,31 @@ define i16 @basic_smax_smin_vec_input_rev(<2 x i16> %src) {
; GISEL-GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GISEL-GFX11-LABEL: basic_smax_smin_vec_input_rev:
-; GISEL-GFX11: ; %bb.0:
-; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT: v_pk_max_i16 v0, 0, v0
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_pk_min_i16 v0, 0xff00ff, v0
-; GISEL-GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GISEL-GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GISEL-GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
-; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GISEL-GFX11-NEXT: v_or_b32_e32 v0, v0, v1
-; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+; GISEL-GFX11-TRUE16-LABEL: basic_smax_smin_vec_input_rev:
+; GISEL-GFX11-TRUE16: ; %bb.0:
+; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-TRUE16-NEXT: v_pk_max_i16 v0, 0, v0
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-TRUE16-NEXT: v_pk_min_i16 v1, 0xff00ff, v0
+; GISEL-GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v1.h
+; GISEL-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l
+; GISEL-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-FAKE16-LABEL: basic_smax_smin_vec_input_rev:
+; GISEL-GFX11-FAKE16: ; %bb.0:
+; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-FAKE16-NEXT: v_pk_max_i16 v0, 0, v0
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_pk_min_i16 v0, 0xff00ff, v0
+; GISEL-GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GISEL-GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GISEL-GFX11-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
+; GISEL-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-GFX12-TRUE16-LABEL: basic_smax_smin_vec_input_rev:
; GISEL-GFX12-TRUE16: ; %bb.0:
@@ -1638,3 +1854,5 @@ define i16 @basic_smax_smin_vec_input_rev(<2 x i16> %src) {
ret i16 %cast
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX11: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
index 68010fc..09d19be 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
@@ -301,7 +301,7 @@ define hidden i32 @called(i32 %a) noinline {
ret i32 %sub
}
-define amdgpu_kernel void @call(ptr addrspace(8) inreg %tmp14, i32 inreg %arg) {
+define amdgpu_kernel void @call(ptr addrspace(8) %tmp14, i32 %arg) {
; GFX9-O0-LABEL: call:
; GFX9-O0: ; %bb.0:
; GFX9-O0-NEXT: s_mov_b32 s32, 0
@@ -533,7 +533,7 @@ define i64 @called_i64(i64 %a) noinline {
ret i64 %sub
}
-define amdgpu_kernel void @call_i64(ptr addrspace(8) inreg %tmp14, i64 inreg %arg) {
+define amdgpu_kernel void @call_i64(ptr addrspace(8) %tmp14, i64 %arg) {
; GFX9-O0-LABEL: call_i64:
; GFX9-O0: ; %bb.0:
; GFX9-O0-NEXT: s_mov_b32 s32, 0
@@ -1153,7 +1153,7 @@ define hidden i32 @strict_wwm_called(i32 %a) noinline {
ret i32 %sub
}
-define amdgpu_kernel void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 inreg %arg) {
+define amdgpu_kernel void @strict_wwm_call(ptr addrspace(8) %tmp14, i32 %arg) {
; GFX9-O0-LABEL: strict_wwm_call:
; GFX9-O0: ; %bb.0:
; GFX9-O0-NEXT: s_mov_b32 s32, 0
@@ -1385,7 +1385,7 @@ define i64 @strict_wwm_called_i64(i64 %a) noinline {
ret i64 %sub
}
-define amdgpu_kernel void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i64 inreg %arg) {
+define amdgpu_kernel void @strict_wwm_call_i64(ptr addrspace(8) %tmp14, i64 %arg) {
; GFX9-O0-LABEL: strict_wwm_call_i64:
; GFX9-O0: ; %bb.0:
; GFX9-O0-NEXT: s_mov_b32 s32, 0
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 49e05f9..ba4cbf9 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -104,6 +104,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcisync %s -o - | FileCheck --check-prefix=RV32XQCISYNC %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcisls %s -o - | FileCheck --check-prefix=RV32XQCISLS %s
; RUN: llc -mtriple=riscv32 -mattr=+xandesperf %s -o - | FileCheck --check-prefix=RV32XANDESPERF %s
+; RUN: llc -mtriple=riscv32 -mattr=+xandesvpackfph %s -o - | FileCheck --check-prefix=RV32XANDESVPACKFPH %s
; RUN: llc -mtriple=riscv32 -mattr=+zaamo %s -o - | FileCheck --check-prefix=RV32ZAAMO %s
; RUN: llc -mtriple=riscv32 -mattr=+zalrsc %s -o - | FileCheck --check-prefix=RV32ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+zca %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCA %s
@@ -254,6 +255,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+xtheadsync %s -o - | FileCheck --check-prefix=RV64XTHEADSYNC %s
; RUN: llc -mtriple=riscv64 -mattr=+xtheadvdot %s -o - | FileCheck --check-prefixes=CHECK,RV64XTHEADVDOT %s
; RUN: llc -mtriple=riscv64 -mattr=+xandesperf %s -o - | FileCheck --check-prefix=RV64XANDESPERF %s
+; RUN: llc -mtriple=riscv64 -mattr=+xandesvpackfph %s -o - | FileCheck --check-prefix=RV64XANDESVPACKFPH %s
; RUN: llc -mtriple=riscv64 -mattr=+za64rs %s -o - | FileCheck --check-prefixes=CHECK,RV64ZA64RS %s
; RUN: llc -mtriple=riscv64 -mattr=+za128rs %s -o - | FileCheck --check-prefixes=CHECK,RV64ZA128RS %s
; RUN: llc -mtriple=riscv64 -mattr=+zama16b %s -o - | FileCheck --check-prefixes=CHECK,RV64ZAMA16B %s
@@ -447,6 +449,7 @@
; RV32XQCISYNC: attribute 5, "rv32i2p1_zca1p0_xqcisync0p2"
; RV32XQCISLS: .attribute 5, "rv32i2p1_xqcisls0p2"
; RV32XANDESPERF: .attribute 5, "rv32i2p1_xandesperf5p0"
+; RV32XANDESVPACKFPH: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zve32f1p0_zve32x1p0_zvfhmin1p0_zvl32b1p0_xandesvpackfph5p0"
; RV32ZAAMO: .attribute 5, "rv32i2p1_zaamo1p0"
; RV32ZALRSC: .attribute 5, "rv32i2p1_zalrsc1p0"
; RV32ZCA: .attribute 5, "rv32i2p1_zca1p0"
@@ -598,6 +601,7 @@
; RV64XTHEADSYNC: .attribute 5, "rv64i2p1_xtheadsync1p0"
; RV64XTHEADVDOT: .attribute 5, "rv64i2p1_f2p2_d2p2_v1p0_zicsr2p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0_xtheadvdot1p0"
; RV64XANDESPERF: .attribute 5, "rv64i2p1_xandesperf5p0"
+; RV64XANDESVPACKFPH: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zve32f1p0_zve32x1p0_zvfhmin1p0_zvl32b1p0_xandesvpackfph5p0"
; RV64ZTSO: .attribute 5, "rv64i2p1_ztso1p0"
; RV64ZAAMO: .attribute 5, "rv64i2p1_zaamo1p0"
; RV64ZALRSC: .attribute 5, "rv64i2p1_zalrsc1p0"
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 5f322dc..cdbb6e6 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -171,6 +171,7 @@
; CHECK-NEXT: ventana-veyron - Ventana Veyron-Series processors.
; CHECK-NEXT: vxrm-pipeline-flush - VXRM writes causes pipeline flush.
; CHECK-NEXT: xandesperf - 'XAndesPerf' (Andes Performance Extension).
+; CHECK-NEXT: xandesvpackfph - 'XAndesVPackFPH' (Andes Vector Packed FP16 Extension).
; CHECK-NEXT: xcvalu - 'XCValu' (CORE-V ALU Operations).
; CHECK-NEXT: xcvbi - 'XCVbi' (CORE-V Immediate Branching).
; CHECK-NEXT: xcvbitmanip - 'XCVbitmanip' (CORE-V Bit Manipulation).
diff --git a/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll b/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll
index 134c2b8..c489bc3 100644
--- a/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll
+++ b/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll
@@ -2,7 +2,7 @@
; RUN: opt -mtriple=riscv64-unknown-linux-gnu -mattr=+v -vector-library=sleefgnuabi -replace-with-veclib -S < %s | FileCheck %s
;.
-; CHECK: @llvm.compiler.used = appending global [21 x ptr] [ptr @Sleef_cosdx_u10rvvm2, ptr @Sleef_cosfx_u10rvvm2, ptr @Sleef_expfx_u10rvvm2, ptr @Sleef_exp10dx_u10rvvm2, ptr @Sleef_exp10fx_u10rvvm2, ptr @Sleef_exp2dx_u10rvvm2, ptr @Sleef_exp2fx_u10rvvm2, ptr @Sleef_fmadx_rvvm2, ptr @Sleef_fmafx_rvvm2, ptr @Sleef_logdx_u10rvvm2, ptr @Sleef_logfx_u10rvvm2, ptr @Sleef_log10dx_u10rvvm2, ptr @Sleef_log10fx_u10rvvm2, ptr @Sleef_log2dx_u10rvvm2, ptr @Sleef_log2fx_u10rvvm2, ptr @Sleef_powdx_u10rvvm2, ptr @Sleef_powfx_u10rvvm2, ptr @Sleef_sindx_u10rvvm2, ptr @Sleef_sinfx_u10rvvm2, ptr @Sleef_sqrtdx_u05rvvm2, ptr @Sleef_sqrtfx_u05rvvm2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [22 x ptr] [ptr @Sleef_cosdx_u10rvvm2, ptr @Sleef_cosfx_u10rvvm2, ptr @Sleef_expdx_u10rvvm2, ptr @Sleef_expfx_u10rvvm2, ptr @Sleef_exp10dx_u10rvvm2, ptr @Sleef_exp10fx_u10rvvm2, ptr @Sleef_exp2dx_u10rvvm2, ptr @Sleef_exp2fx_u10rvvm2, ptr @Sleef_fmadx_rvvm2, ptr @Sleef_fmafx_rvvm2, ptr @Sleef_logdx_u10rvvm2, ptr @Sleef_logfx_u10rvvm2, ptr @Sleef_log10dx_u10rvvm2, ptr @Sleef_log10fx_u10rvvm2, ptr @Sleef_log2dx_u10rvvm2, ptr @Sleef_log2fx_u10rvvm2, ptr @Sleef_powdx_u10rvvm2, ptr @Sleef_powfx_u10rvvm2, ptr @Sleef_sindx_u10rvvm2, ptr @Sleef_sinfx_u10rvvm2, ptr @Sleef_sqrtdx_u05rvvm2, ptr @Sleef_sqrtfx_u05rvvm2], section "llvm.metadata"
;.
define <vscale x 2 x double> @llvm_ceil_vscale_f64(<vscale x 2 x double> %in) {
; CHECK-LABEL: define <vscale x 2 x double> @llvm_ceil_vscale_f64(
@@ -67,7 +67,7 @@ define <vscale x 4 x float> @llvm_cos_vscale_f32(<vscale x 4 x float> %in) {
define <vscale x 2 x double> @llvm_exp_vscale_f64(<vscale x 2 x double> %in) {
; CHECK-LABEL: define <vscale x 2 x double> @llvm_exp_vscale_f64(
; CHECK-SAME: <vscale x 2 x double> [[IN:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @llvm.exp.nxv2f64(<vscale x 2 x double> [[IN]])
+; CHECK-NEXT: [[TMP1:%.*]] = call fast <vscale x 2 x double> @Sleef_expdx_u10rvvm2(<vscale x 2 x double> [[IN]])
; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
;
%1 = call fast <vscale x 2 x double> @llvm.exp.nxv2f64(<vscale x 2 x double> %in)
diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index f8ca417..ab09910 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -3,6 +3,8 @@
; RUN: | FileCheck %s -check-prefixes=CHECK,RV32I
; RUN: llc -mtriple=riscv32 -mattr=+m,+zba -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBA
+; RUN: llc -mtriple=riscv32 -mattr=+m,+xandesperf -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV32XANDESPERF
define signext i16 @sh1add(i64 %0, ptr %1) {
; RV32I-LABEL: sh1add:
@@ -17,6 +19,12 @@ define signext i16 @sh1add(i64 %0, ptr %1) {
; RV32ZBA-NEXT: sh1add a0, a0, a2
; RV32ZBA-NEXT: lh a0, 0(a0)
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: sh1add:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a2, a0
+; RV32XANDESPERF-NEXT: lh a0, 0(a0)
+; RV32XANDESPERF-NEXT: ret
%3 = getelementptr inbounds i16, ptr %1, i64 %0
%4 = load i16, ptr %3
ret i16 %4
@@ -35,6 +43,12 @@ define i32 @sh2add(i64 %0, ptr %1) {
; RV32ZBA-NEXT: sh2add a0, a0, a2
; RV32ZBA-NEXT: lw a0, 0(a0)
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: sh2add:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a2, a0
+; RV32XANDESPERF-NEXT: lw a0, 0(a0)
+; RV32XANDESPERF-NEXT: ret
%3 = getelementptr inbounds i32, ptr %1, i64 %0
%4 = load i32, ptr %3
ret i32 %4
@@ -55,6 +69,13 @@ define i64 @sh3add(i64 %0, ptr %1) {
; RV32ZBA-NEXT: lw a0, 0(a1)
; RV32ZBA-NEXT: lw a1, 4(a1)
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: sh3add:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a1, a2, a0
+; RV32XANDESPERF-NEXT: lw a0, 0(a1)
+; RV32XANDESPERF-NEXT: lw a1, 4(a1)
+; RV32XANDESPERF-NEXT: ret
%3 = getelementptr inbounds i64, ptr %1, i64 %0
%4 = load i64, ptr %3
ret i64 %4
@@ -74,6 +95,12 @@ define i32 @addmul6(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh1add a0, a0, a0
; RV32ZBA-NEXT: sh1add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul6:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 6
%d = add i32 %c, %b
ret i32 %d
@@ -93,6 +120,12 @@ define i32 @addmul10(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh2add a0, a0, a0
; RV32ZBA-NEXT: sh1add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul10:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 10
%d = add i32 %c, %b
ret i32 %d
@@ -112,6 +145,12 @@ define i32 @addmul12(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh1add a0, a0, a0
; RV32ZBA-NEXT: sh2add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul12:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 12
%d = add i32 %c, %b
ret i32 %d
@@ -131,6 +170,12 @@ define i32 @addmul18(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh3add a0, a0, a0
; RV32ZBA-NEXT: sh1add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul18:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 18
%d = add i32 %c, %b
ret i32 %d
@@ -150,6 +195,12 @@ define i32 @addmul20(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh2add a0, a0, a0
; RV32ZBA-NEXT: sh2add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul20:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 20
%d = add i32 %c, %b
ret i32 %d
@@ -169,6 +220,12 @@ define i32 @addmul24(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh1add a0, a0, a0
; RV32ZBA-NEXT: sh3add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul24:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 24
%d = add i32 %c, %b
ret i32 %d
@@ -188,6 +245,12 @@ define i32 @addmul36(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh3add a0, a0, a0
; RV32ZBA-NEXT: sh2add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul36:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 36
%d = add i32 %c, %b
ret i32 %d
@@ -207,6 +270,12 @@ define i32 @addmul40(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh2add a0, a0, a0
; RV32ZBA-NEXT: sh3add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul40:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 40
%d = add i32 %c, %b
ret i32 %d
@@ -226,6 +295,12 @@ define i32 @addmul72(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh3add a0, a0, a0
; RV32ZBA-NEXT: sh3add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addmul72:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 72
%d = add i32 %c, %b
ret i32 %d
@@ -244,6 +319,12 @@ define i32 @mul96(i32 %a) {
; RV32ZBA-NEXT: sh1add a0, a0, a0
; RV32ZBA-NEXT: slli a0, a0, 5
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul96:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV32XANDESPERF-NEXT: slli a0, a0, 5
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 96
ret i32 %c
}
@@ -261,6 +342,12 @@ define i32 @mul160(i32 %a) {
; RV32ZBA-NEXT: sh2add a0, a0, a0
; RV32ZBA-NEXT: slli a0, a0, 5
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul160:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV32XANDESPERF-NEXT: slli a0, a0, 5
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 160
ret i32 %c
}
@@ -278,6 +365,12 @@ define i32 @mul288(i32 %a) {
; RV32ZBA-NEXT: sh3add a0, a0, a0
; RV32ZBA-NEXT: slli a0, a0, 5
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul288:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV32XANDESPERF-NEXT: slli a0, a0, 5
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 288
ret i32 %c
}
@@ -295,6 +388,12 @@ define i32 @mul258(i32 %a) {
; RV32ZBA-NEXT: slli a1, a0, 8
; RV32ZBA-NEXT: sh1add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul258:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: slli a1, a0, 8
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 258
ret i32 %c
}
@@ -312,6 +411,12 @@ define i32 @mul260(i32 %a) {
; RV32ZBA-NEXT: slli a1, a0, 8
; RV32ZBA-NEXT: sh2add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul260:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: slli a1, a0, 8
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 260
ret i32 %c
}
@@ -329,6 +434,12 @@ define i32 @mul264(i32 %a) {
; RV32ZBA-NEXT: slli a1, a0, 8
; RV32ZBA-NEXT: sh3add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul264:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: slli a1, a0, 8
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 264
ret i32 %c
}
@@ -345,6 +456,12 @@ define i32 @mul11(i32 %a) {
; RV32ZBA-NEXT: sh2add a1, a0, a0
; RV32ZBA-NEXT: sh1add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul11:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a1, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 11
ret i32 %c
}
@@ -361,6 +478,12 @@ define i32 @mul19(i32 %a) {
; RV32ZBA-NEXT: sh3add a1, a0, a0
; RV32ZBA-NEXT: sh1add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul19:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a1, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 19
ret i32 %c
}
@@ -377,6 +500,12 @@ define i32 @mul13(i32 %a) {
; RV32ZBA-NEXT: sh1add a1, a0, a0
; RV32ZBA-NEXT: sh2add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul13:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a1, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 13
ret i32 %c
}
@@ -393,6 +522,12 @@ define i32 @mul21(i32 %a) {
; RV32ZBA-NEXT: sh2add a1, a0, a0
; RV32ZBA-NEXT: sh2add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul21:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a1, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 21
ret i32 %c
}
@@ -409,6 +544,12 @@ define i32 @mul37(i32 %a) {
; RV32ZBA-NEXT: sh3add a1, a0, a0
; RV32ZBA-NEXT: sh2add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul37:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a1, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 37
ret i32 %c
}
@@ -425,6 +566,12 @@ define i32 @mul25(i32 %a) {
; RV32ZBA-NEXT: sh2add a0, a0, a0
; RV32ZBA-NEXT: sh2add a0, a0, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul25:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 25
ret i32 %c
}
@@ -441,6 +588,12 @@ define i32 @mul41(i32 %a) {
; RV32ZBA-NEXT: sh2add a1, a0, a0
; RV32ZBA-NEXT: sh3add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul41:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a1, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 41
ret i32 %c
}
@@ -457,6 +610,12 @@ define i32 @mul73(i32 %a) {
; RV32ZBA-NEXT: sh3add a1, a0, a0
; RV32ZBA-NEXT: sh3add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul73:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a1, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 73
ret i32 %c
}
@@ -473,6 +632,12 @@ define i32 @mul27(i32 %a) {
; RV32ZBA-NEXT: sh1add a0, a0, a0
; RV32ZBA-NEXT: sh3add a0, a0, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul27:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 27
ret i32 %c
}
@@ -489,6 +654,12 @@ define i32 @mul45(i32 %a) {
; RV32ZBA-NEXT: sh2add a0, a0, a0
; RV32ZBA-NEXT: sh3add a0, a0, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul45:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 45
ret i32 %c
}
@@ -505,6 +676,12 @@ define i32 @mul81(i32 %a) {
; RV32ZBA-NEXT: sh3add a0, a0, a0
; RV32ZBA-NEXT: sh3add a0, a0, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul81:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 81
ret i32 %c
}
@@ -522,6 +699,12 @@ define i32 @mul4098(i32 %a) {
; RV32ZBA-NEXT: slli a1, a0, 12
; RV32ZBA-NEXT: sh1add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul4098:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: slli a1, a0, 12
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 4098
ret i32 %c
}
@@ -539,6 +722,12 @@ define i32 @mul4100(i32 %a) {
; RV32ZBA-NEXT: slli a1, a0, 12
; RV32ZBA-NEXT: sh2add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul4100:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: slli a1, a0, 12
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 4100
ret i32 %c
}
@@ -556,6 +745,12 @@ define i32 @mul4104(i32 %a) {
; RV32ZBA-NEXT: slli a1, a0, 12
; RV32ZBA-NEXT: sh3add a0, a0, a1
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul4104:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: slli a1, a0, 12
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, 4104
ret i32 %c
}
@@ -573,6 +768,12 @@ define i32 @add4104(i32 %a) {
; RV32ZBA-NEXT: li a1, 1026
; RV32ZBA-NEXT: sh2add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: add4104:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: li a1, 1026
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = add i32 %a, 4104
ret i32 %c
}
@@ -590,6 +791,12 @@ define i32 @add8208(i32 %a) {
; RV32ZBA-NEXT: li a1, 1026
; RV32ZBA-NEXT: sh3add a0, a1, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: add8208:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: li a1, 1026
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
%c = add i32 %a, 8208
ret i32 %c
}
@@ -617,6 +824,12 @@ define i32 @addshl_5_6(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh1add a0, a1, a0
; RV32ZBA-NEXT: slli a0, a0, 5
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addshl_5_6:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV32XANDESPERF-NEXT: slli a0, a0, 5
+; RV32XANDESPERF-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 6
%e = add i32 %c, %d
@@ -636,6 +849,12 @@ define i32 @addshl_5_7(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh2add a0, a1, a0
; RV32ZBA-NEXT: slli a0, a0, 5
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addshl_5_7:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV32XANDESPERF-NEXT: slli a0, a0, 5
+; RV32XANDESPERF-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 7
%e = add i32 %c, %d
@@ -655,6 +874,12 @@ define i32 @addshl_5_8(i32 %a, i32 %b) {
; RV32ZBA-NEXT: sh3add a0, a1, a0
; RV32ZBA-NEXT: slli a0, a0, 5
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: addshl_5_8:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV32XANDESPERF-NEXT: slli a0, a0, 5
+; RV32XANDESPERF-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 8
%e = add i32 %c, %d
@@ -676,6 +901,13 @@ define i32 @srli_1_sh2add(ptr %0, i32 %1) {
; RV32ZBA-NEXT: sh2add a0, a1, a0
; RV32ZBA-NEXT: lw a0, 0(a0)
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: srli_1_sh2add:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: srli a1, a1, 1
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV32XANDESPERF-NEXT: lw a0, 0(a0)
+; RV32XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 1
%4 = getelementptr inbounds i32, ptr %0, i32 %3
%5 = load i32, ptr %4, align 4
@@ -699,6 +931,14 @@ define i64 @srli_2_sh3add(ptr %0, i32 %1) {
; RV32ZBA-NEXT: lw a0, 0(a1)
; RV32ZBA-NEXT: lw a1, 4(a1)
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: srli_2_sh3add:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: srli a1, a1, 2
+; RV32XANDESPERF-NEXT: nds.lea.d a1, a0, a1
+; RV32XANDESPERF-NEXT: lw a0, 0(a1)
+; RV32XANDESPERF-NEXT: lw a1, 4(a1)
+; RV32XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 2
%4 = getelementptr inbounds i64, ptr %0, i32 %3
%5 = load i64, ptr %4, align 8
@@ -720,6 +960,13 @@ define signext i16 @srli_2_sh1add(ptr %0, i32 %1) {
; RV32ZBA-NEXT: sh1add a0, a1, a0
; RV32ZBA-NEXT: lh a0, 0(a0)
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: srli_2_sh1add:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: srli a1, a1, 2
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV32XANDESPERF-NEXT: lh a0, 0(a0)
+; RV32XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 2
%4 = getelementptr inbounds i16, ptr %0, i32 %3
%5 = load i16, ptr %4, align 2
@@ -741,6 +988,13 @@ define i32 @srli_3_sh2add(ptr %0, i32 %1) {
; RV32ZBA-NEXT: sh2add a0, a1, a0
; RV32ZBA-NEXT: lw a0, 0(a0)
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: srli_3_sh2add:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: srli a1, a1, 3
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV32XANDESPERF-NEXT: lw a0, 0(a0)
+; RV32XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 3
%4 = getelementptr inbounds i32, ptr %0, i32 %3
%5 = load i32, ptr %4, align 4
@@ -764,6 +1018,14 @@ define i64 @srli_4_sh3add(ptr %0, i32 %1) {
; RV32ZBA-NEXT: lw a0, 0(a1)
; RV32ZBA-NEXT: lw a1, 4(a1)
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: srli_4_sh3add:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: srli a1, a1, 4
+; RV32XANDESPERF-NEXT: nds.lea.d a1, a0, a1
+; RV32XANDESPERF-NEXT: lw a0, 0(a1)
+; RV32XANDESPERF-NEXT: lw a1, 4(a1)
+; RV32XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 4
%4 = getelementptr inbounds i64, ptr %0, i32 %3
%5 = load i64, ptr %4, align 8
@@ -802,6 +1064,12 @@ define i32 @mul_neg3(i32 %a) {
; RV32ZBA-NEXT: sh1add a0, a0, a0
; RV32ZBA-NEXT: neg a0, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul_neg3:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV32XANDESPERF-NEXT: neg a0, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, -3
ret i32 %c
}
@@ -829,6 +1097,12 @@ define i32 @mul_neg5(i32 %a) {
; RV32ZBA-NEXT: sh2add a0, a0, a0
; RV32ZBA-NEXT: neg a0, a0
; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: mul_neg5:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV32XANDESPERF-NEXT: neg a0, a0
+; RV32XANDESPERF-NEXT: ret
%c = mul i32 %a, -5
ret i32 %c
}
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index e362e5e..a023845 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -7,6 +7,8 @@
; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB,RV64ZBAZBBNOZBS
; RUN: llc -mtriple=riscv64 -mattr=+m,+zba,+zbb,+zbs -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB,RV64ZBAZBBZBS
+; RUN: llc -mtriple=riscv64 -mattr=+m,+xandesperf -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=CHECK,RV64XANDESPERF
define i64 @slliuw(i64 %a) nounwind {
; RV64I-LABEL: slliuw:
@@ -19,6 +21,12 @@ define i64 @slliuw(i64 %a) nounwind {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: slli.uw a0, a0, 1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: slliuw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 31
+; RV64XANDESPERF-NEXT: ret
%conv1 = shl i64 %a, 1
%shl = and i64 %conv1, 8589934590
ret i64 %shl
@@ -41,6 +49,15 @@ define i128 @slliuw_2(i32 signext %0, ptr %1) {
; RV64ZBA-NEXT: ld a0, 0(a1)
; RV64ZBA-NEXT: ld a1, 8(a1)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: slliuw_2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 28
+; RV64XANDESPERF-NEXT: add a1, a1, a0
+; RV64XANDESPERF-NEXT: ld a0, 0(a1)
+; RV64XANDESPERF-NEXT: ld a1, 8(a1)
+; RV64XANDESPERF-NEXT: ret
%3 = zext i32 %0 to i64
%4 = getelementptr inbounds i128, ptr %1, i64 %3
%5 = load i128, ptr %4
@@ -59,6 +76,11 @@ define i64 @adduw(i64 %a, i64 %b) nounwind {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: add.uw a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: adduw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%and = and i64 %b, 4294967295
%add = add i64 %and, %a
ret i64 %add
@@ -78,6 +100,12 @@ define signext i8 @adduw_2(i32 signext %0, ptr %1) {
; RV64ZBA-NEXT: add.uw a0, a0, a1
; RV64ZBA-NEXT: lb a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: adduw_2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: lb a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = zext i32 %0 to i64
%4 = getelementptr inbounds i8, ptr %1, i64 %3
%5 = load i8, ptr %4
@@ -95,6 +123,11 @@ define i64 @zextw_i64(i64 %a) nounwind {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zextw_i64:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, zero, a0
+; RV64XANDESPERF-NEXT: ret
%and = and i64 %a, 4294967295
ret i64 %and
}
@@ -114,6 +147,12 @@ define i64 @zextw_demandedbits_i64(i64 %0) {
; RV64ZBA-NEXT: ori a0, a0, 1
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zextw_demandedbits_i64:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: ori a0, a0, 1
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, zero, a0
+; RV64XANDESPERF-NEXT: ret
%2 = and i64 %0, 4294967294
%3 = or i64 %2, 1
ret i64 %3
@@ -132,6 +171,12 @@ define signext i16 @sh1add(i64 %0, ptr %1) {
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh1add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = getelementptr inbounds i16, ptr %1, i64 %0
%4 = load i16, ptr %3
ret i16 %4
@@ -150,6 +195,12 @@ define signext i32 @sh2add(i64 %0, ptr %1) {
; RV64ZBA-NEXT: sh2add a0, a0, a1
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh2add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = getelementptr inbounds i32, ptr %1, i64 %0
%4 = load i32, ptr %3
ret i32 %4
@@ -168,6 +219,12 @@ define i64 @sh3add(i64 %0, ptr %1) {
; RV64ZBA-NEXT: sh3add a0, a0, a1
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh3add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = getelementptr inbounds i64, ptr %1, i64 %0
%4 = load i64, ptr %3
ret i64 %4
@@ -187,6 +244,12 @@ define signext i16 @sh1adduw(i32 signext %0, ptr %1) {
; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh1adduw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = zext i32 %0 to i64
%4 = getelementptr inbounds i16, ptr %1, i64 %3
%5 = load i16, ptr %4
@@ -205,6 +268,11 @@ define i64 @sh1adduw_2(i64 %0, i64 %1) {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh1adduw_2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%3 = shl i64 %0, 1
%4 = and i64 %3, 8589934590
%5 = add i64 %4, %1
@@ -223,6 +291,11 @@ define i64 @sh1adduw_3(i64 %0, i64 %1) {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh1adduw_3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%3 = shl i64 %0, 1
%4 = and i64 %3, 8589934590
%5 = or disjoint i64 %4, %1
@@ -243,6 +316,12 @@ define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh2adduw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = zext i32 %0 to i64
%4 = getelementptr inbounds i32, ptr %1, i64 %3
%5 = load i32, ptr %4
@@ -261,6 +340,11 @@ define i64 @sh2adduw_2(i64 %0, i64 %1) {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh2adduw_2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%3 = shl i64 %0, 2
%4 = and i64 %3, 17179869180
%5 = add i64 %4, %1
@@ -279,6 +363,11 @@ define i64 @sh2adduw_3(i64 %0, i64 %1) {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh2adduw_3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%3 = shl i64 %0, 2
%4 = and i64 %3, 17179869180
%5 = or disjoint i64 %4, %1
@@ -299,6 +388,12 @@ define i64 @sh3adduw(i32 signext %0, ptr %1) {
; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh3adduw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = zext i32 %0 to i64
%4 = getelementptr inbounds i64, ptr %1, i64 %3
%5 = load i64, ptr %4
@@ -315,8 +410,17 @@ define i64 @sh3adduw_2(i64 %0, i64 %1) {
;
; RV64ZBA-LABEL: sh3adduw_2:
; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 3
+; RV64ZBA-NEXT: srli a0, a0, 3
; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh3adduw_2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 3
+; RV64XANDESPERF-NEXT: srli a0, a0, 3
+; RV64XANDESPERF-NEXT: nds.lea.d.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%3 = shl i64 %0, 3
%4 = and i64 %3, 34359738360
%5 = add i64 %4, %1
@@ -333,8 +437,17 @@ define i64 @sh3adduw_3(i64 %0, i64 %1) {
;
; RV64ZBA-LABEL: sh3adduw_3:
; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a0, a0, 3
+; RV64ZBA-NEXT: srli a0, a0, 3
; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh3adduw_3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 3
+; RV64XANDESPERF-NEXT: srli a0, a0, 3
+; RV64XANDESPERF-NEXT: nds.lea.d.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%3 = shl i64 %0, 3
%4 = and i64 %3, 34359738360
%5 = or disjoint i64 %4, %1
@@ -363,6 +476,14 @@ define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
; RV64ZBA-NEXT: sraiw a0, a0, 2
; RV64ZBA-NEXT: mul a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh2add_extra_sext:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: sllw a1, a2, a0
+; RV64XANDESPERF-NEXT: sraiw a0, a0, 2
+; RV64XANDESPERF-NEXT: mul a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%a = shl i32 %x, 2
%b = add i32 %a, %y
%c = shl i32 %z, %b
@@ -387,6 +508,12 @@ define i64 @addmul6(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul6:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 6
%d = add i64 %c, %b
ret i64 %d
@@ -406,6 +533,12 @@ define i64 @disjointormul6(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: disjointormul6:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 6
%d = or disjoint i64 %c, %b
ret i64 %d
@@ -425,6 +558,12 @@ define i64 @addmul10(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul10:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 10
%d = add i64 %c, %b
ret i64 %d
@@ -444,6 +583,12 @@ define i64 @addmul12(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: sh2add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul12:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 12
%d = add i64 %c, %b
ret i64 %d
@@ -463,6 +608,12 @@ define i64 @addmul18(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul18:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 18
%d = add i64 %c, %b
ret i64 %d
@@ -482,6 +633,12 @@ define i64 @addmul20(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: sh2add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul20:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 20
%d = add i64 %c, %b
ret i64 %d
@@ -513,6 +670,12 @@ define i64 @addmul24(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: sh3add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul24:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 24
%d = add i64 %c, %b
ret i64 %d
@@ -532,6 +695,12 @@ define i64 @addmul36(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: sh2add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul36:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 36
%d = add i64 %c, %b
ret i64 %d
@@ -551,6 +720,12 @@ define i64 @addmul40(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: sh3add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul40:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 40
%d = add i64 %c, %b
ret i64 %d
@@ -570,6 +745,12 @@ define i64 @addmul72(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: sh3add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul72:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 72
%d = add i64 %c, %b
ret i64 %d
@@ -588,6 +769,13 @@ define i64 @mul50(i64 %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul50:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 50
ret i64 %c
}
@@ -606,6 +794,13 @@ define i64 @addmul50(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul50:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 50
%d = add i64 %c, %b
ret i64 %d
@@ -624,6 +819,13 @@ define i64 @mul100(i64 %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 2
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul100:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 2
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 100
ret i64 %c
}
@@ -642,6 +844,13 @@ define i64 @addmul100(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: sh2add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul100:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 100
%d = add i64 %c, %b
ret i64 %d
@@ -660,6 +869,13 @@ define i64 @mul162(i64 %a) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul162:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 162
ret i64 %c
}
@@ -678,6 +894,13 @@ define i64 @addmul162(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul162:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 162
%d = add i64 %c, %b
ret i64 %d
@@ -696,6 +919,13 @@ define i64 @mul180(i64 %a) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 2
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul180:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 2
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 180
ret i64 %c
}
@@ -714,6 +944,13 @@ define i64 @addmul180(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: sh2add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul180:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 180
%d = add i64 %c, %b
ret i64 %d
@@ -734,6 +971,14 @@ define i64 @add255mul180(i64 %a) {
; RV64ZBA-NEXT: slli a0, a0, 2
; RV64ZBA-NEXT: addi a0, a0, 255
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: add255mul180:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 2
+; RV64XANDESPERF-NEXT: addi a0, a0, 255
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 180
%d = add i64 %c, 255
ret i64 %d
@@ -752,6 +997,13 @@ define i64 @mul200(i64 %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 3
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul200:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 3
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 200
ret i64 %c
}
@@ -770,6 +1022,13 @@ define i64 @addmul200(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: sh3add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addmul200:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 200
%d = add i64 %c, %b
ret i64 %d
@@ -812,6 +1071,12 @@ define i64 @mul96(i64 %a) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul96:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 96
ret i64 %c
}
@@ -829,6 +1094,13 @@ define i64 @mul119(i64 %a) {
; RV64ZBA-NEXT: slli a0, a0, 7
; RV64ZBA-NEXT: sub a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul119:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a1, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 7
+; RV64XANDESPERF-NEXT: sub a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 119
ret i64 %c
}
@@ -846,6 +1118,13 @@ define i64 @mul123(i64 %a) {
; RV64ZBA-NEXT: slli a0, a0, 7
; RV64ZBA-NEXT: sub a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul123:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a1, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 7
+; RV64XANDESPERF-NEXT: sub a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 123
ret i64 %c
}
@@ -863,6 +1142,13 @@ define i64 @mul125(i64 %a) {
; RV64ZBA-NEXT: slli a0, a0, 7
; RV64ZBA-NEXT: sub a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul125:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a1, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 7
+; RV64XANDESPERF-NEXT: sub a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 125
ret i64 %c
}
@@ -880,6 +1166,13 @@ define i64 @mul131(i64 %a) {
; RV64ZBA-NEXT: slli a0, a0, 7
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul131:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a1, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 7
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 131
ret i64 %c
}
@@ -897,6 +1190,13 @@ define i64 @mul133(i64 %a) {
; RV64ZBA-NEXT: slli a0, a0, 7
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul133:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a1, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 7
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 133
ret i64 %c
}
@@ -914,6 +1214,13 @@ define i64 @mul137(i64 %a) {
; RV64ZBA-NEXT: slli a0, a0, 7
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul137:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a1, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 7
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 137
ret i64 %c
}
@@ -931,6 +1238,12 @@ define i64 @mul160(i64 %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul160:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 160
ret i64 %c
}
@@ -948,6 +1261,12 @@ define i64 @mul288(i64 %a) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul288:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 288
ret i64 %c
}
@@ -966,6 +1285,13 @@ define i64 @zext_mul68(i32 signext %a) {
; RV64ZBA-NEXT: slli.uw a1, a0, 6
; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zext_mul68:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a0, 32
+; RV64XANDESPERF-NEXT: srli a1, a1, 26
+; RV64XANDESPERF-NEXT: nds.lea.w.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 68
ret i64 %c
@@ -985,6 +1311,13 @@ define i64 @zext_mul96(i32 signext %a) {
; RV64ZBA-NEXT: slli.uw a0, a0, 5
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zext_mul96:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 27
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 96
ret i64 %c
@@ -1004,6 +1337,13 @@ define i64 @zext_mul160(i32 signext %a) {
; RV64ZBA-NEXT: slli.uw a0, a0, 5
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zext_mul160:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 27
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 160
ret i64 %c
@@ -1023,6 +1363,13 @@ define i64 @zext_mul288(i32 signext %a) {
; RV64ZBA-NEXT: slli.uw a0, a0, 5
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zext_mul288:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 27
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 288
ret i64 %c
@@ -1042,6 +1389,12 @@ define i64 @zext_mul12884901888(i32 signext %a) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zext_mul12884901888:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 12884901888
ret i64 %c
@@ -1061,6 +1414,12 @@ define i64 @zext_mul21474836480(i32 signext %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zext_mul21474836480:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 21474836480
ret i64 %c
@@ -1080,6 +1439,12 @@ define i64 @zext_mul38654705664(i32 signext %a) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: zext_mul38654705664:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 38654705664
ret i64 %c
@@ -1131,6 +1496,13 @@ define i64 @sh1adduw_imm(i32 signext %0) {
; RV64ZBA-NEXT: slli.uw a0, a0, 1
; RV64ZBA-NEXT: addi a0, a0, 11
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh1adduw_imm:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 31
+; RV64XANDESPERF-NEXT: addi a0, a0, 11
+; RV64XANDESPERF-NEXT: ret
%a = zext i32 %0 to i64
%b = shl i64 %a, 1
%c = add i64 %b, 11
@@ -1150,6 +1522,13 @@ define i64 @sh2adduw_imm(i32 signext %0) {
; RV64ZBA-NEXT: slli.uw a0, a0, 2
; RV64ZBA-NEXT: addi a0, a0, -12
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh2adduw_imm:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 30
+; RV64XANDESPERF-NEXT: addi a0, a0, -12
+; RV64XANDESPERF-NEXT: ret
%a = zext i32 %0 to i64
%b = shl i64 %a, 2
%c = add i64 %b, -12
@@ -1169,6 +1548,13 @@ define i64 @sh3adduw_imm(i32 signext %0) {
; RV64ZBA-NEXT: slli.uw a0, a0, 3
; RV64ZBA-NEXT: addi a0, a0, 13
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh3adduw_imm:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 29
+; RV64XANDESPERF-NEXT: addi a0, a0, 13
+; RV64XANDESPERF-NEXT: ret
%a = zext i32 %0 to i64
%b = shl i64 %a, 3
%c = add i64 %b, 13
@@ -1188,6 +1574,12 @@ define i64 @adduw_imm(i32 signext %0) nounwind {
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: addi a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: adduw_imm:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, zero, a0
+; RV64XANDESPERF-NEXT: addi a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%a = zext i32 %0 to i64
%b = add i64 %a, 5
ret i64 %b
@@ -1206,6 +1598,12 @@ define i64 @mul258(i64 %a) {
; RV64ZBA-NEXT: slli a1, a0, 8
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul258:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a0, 8
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 258
ret i64 %c
}
@@ -1223,6 +1621,12 @@ define i64 @mul260(i64 %a) {
; RV64ZBA-NEXT: slli a1, a0, 8
; RV64ZBA-NEXT: sh2add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul260:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a0, 8
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 260
ret i64 %c
}
@@ -1240,6 +1644,12 @@ define i64 @mul264(i64 %a) {
; RV64ZBA-NEXT: slli a1, a0, 8
; RV64ZBA-NEXT: sh3add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul264:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a0, 8
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 264
ret i64 %c
}
@@ -1257,6 +1667,13 @@ define i64 @imm_zextw() nounwind {
; RV64ZBA-NEXT: li a0, -2
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: imm_zextw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a0, 1
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: addi a0, a0, -2
+; RV64XANDESPERF-NEXT: ret
ret i64 4294967294 ; -2 in 32 bits.
}
@@ -1272,6 +1689,12 @@ define i64 @mul11(i64 %a) {
; RV64ZBA-NEXT: sh2add a1, a0, a0
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul11:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a1, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 11
ret i64 %c
}
@@ -1288,6 +1711,12 @@ define i64 @mul19(i64 %a) {
; RV64ZBA-NEXT: sh3add a1, a0, a0
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul19:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a1, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 19
ret i64 %c
}
@@ -1304,6 +1733,12 @@ define i64 @mul13(i64 %a) {
; RV64ZBA-NEXT: sh1add a1, a0, a0
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul13:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a1, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 13
ret i64 %c
}
@@ -1320,6 +1755,12 @@ define i64 @mul21(i64 %a) {
; RV64ZBA-NEXT: sh2add a1, a0, a0
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul21:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a1, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 21
ret i64 %c
}
@@ -1336,6 +1777,12 @@ define i64 @mul37(i64 %a) {
; RV64ZBA-NEXT: sh3add a1, a0, a0
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul37:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a1, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 37
ret i64 %c
}
@@ -1352,6 +1799,12 @@ define i64 @mul25(i64 %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul25:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 25
ret i64 %c
}
@@ -1368,6 +1821,12 @@ define i64 @mul41(i64 %a) {
; RV64ZBA-NEXT: sh2add a1, a0, a0
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul41:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a1, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 41
ret i64 %c
}
@@ -1384,6 +1843,12 @@ define i64 @mul73(i64 %a) {
; RV64ZBA-NEXT: sh3add a1, a0, a0
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul73:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a1, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 73
ret i64 %c
}
@@ -1400,6 +1865,12 @@ define i64 @mul27(i64 %a) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul27:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 27
ret i64 %c
}
@@ -1416,6 +1887,12 @@ define i64 @mul45(i64 %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul45:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 45
ret i64 %c
}
@@ -1432,6 +1909,12 @@ define i64 @mul81(i64 %a) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul81:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 81
ret i64 %c
}
@@ -1449,6 +1932,12 @@ define i64 @mul4098(i64 %a) {
; RV64ZBA-NEXT: slli a1, a0, 12
; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul4098:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a0, 12
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 4098
ret i64 %c
}
@@ -1466,6 +1955,12 @@ define i64 @mul4100(i64 %a) {
; RV64ZBA-NEXT: slli a1, a0, 12
; RV64ZBA-NEXT: sh2add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul4100:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a0, 12
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 4100
ret i64 %c
}
@@ -1483,6 +1978,12 @@ define i64 @mul4104(i64 %a) {
; RV64ZBA-NEXT: slli a1, a0, 12
; RV64ZBA-NEXT: sh3add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul4104:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a0, 12
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, 4104
ret i64 %c
}
@@ -1500,6 +2001,12 @@ define signext i32 @mulw192(i32 signext %a) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: slliw a0, a0, 6
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mulw192:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: slliw a0, a0, 6
+; RV64XANDESPERF-NEXT: ret
%c = mul i32 %a, 192
ret i32 %c
}
@@ -1517,6 +2024,12 @@ define signext i32 @mulw320(i32 signext %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: slliw a0, a0, 6
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mulw320:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: slliw a0, a0, 6
+; RV64XANDESPERF-NEXT: ret
%c = mul i32 %a, 320
ret i32 %c
}
@@ -1534,6 +2047,12 @@ define signext i32 @mulw576(i32 signext %a) {
; RV64ZBA-NEXT: sh3add a0, a0, a0
; RV64ZBA-NEXT: slliw a0, a0, 6
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mulw576:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: slliw a0, a0, 6
+; RV64XANDESPERF-NEXT: ret
%c = mul i32 %a, 576
ret i32 %c
}
@@ -1551,6 +2070,12 @@ define i64 @add4104(i64 %a) {
; RV64ZBA-NEXT: li a1, 1026
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: add4104:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a1, 1026
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = add i64 %a, 4104
ret i64 %c
}
@@ -1568,6 +2093,12 @@ define i64 @add4104_2(i64 %a) {
; RV64ZBA-NEXT: li a1, 1026
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: add4104_2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a1, 1026
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = or disjoint i64 %a, 4104
ret i64 %c
}
@@ -1585,6 +2116,12 @@ define i64 @add8208(i64 %a) {
; RV64ZBA-NEXT: li a1, 1026
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: add8208:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a1, 1026
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%c = add i64 %a, 8208
ret i64 %c
}
@@ -1624,6 +2161,12 @@ define signext i32 @addshl32_5_6(i32 signext %a, i32 signext %b) {
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: slliw a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addshl32_5_6:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV64XANDESPERF-NEXT: slliw a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 6
%e = add i32 %c, %d
@@ -1643,6 +2186,12 @@ define i64 @addshl64_5_6(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: slli a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addshl64_5_6:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV64XANDESPERF-NEXT: slli a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = shl i64 %a, 5
%d = shl i64 %b, 6
%e = add i64 %c, %d
@@ -1662,6 +2211,12 @@ define signext i32 @addshl32_5_7(i32 signext %a, i32 signext %b) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: slliw a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addshl32_5_7:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: slliw a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 7
%e = add i32 %c, %d
@@ -1681,6 +2236,12 @@ define i64 @addshl64_5_7(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: slli a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addshl64_5_7:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: slli a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = shl i64 %a, 5
%d = shl i64 %b, 7
%e = add i64 %c, %d
@@ -1700,6 +2261,12 @@ define signext i32 @addshl32_5_8(i32 signext %a, i32 signext %b) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: slliw a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addshl32_5_8:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: slliw a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 8
%e = add i32 %c, %d
@@ -1719,6 +2286,12 @@ define i64 @addshl64_5_8(i64 %a, i64 %b) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: slli a0, a0, 5
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: addshl64_5_8:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: slli a0, a0, 5
+; RV64XANDESPERF-NEXT: ret
%c = shl i64 %a, 5
%d = shl i64 %b, 8
%e = add i64 %c, %d
@@ -1747,6 +2320,13 @@ define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
; RV64ZBAZBB-NEXT: slli a0, a0, 23
; RV64ZBAZBB-NEXT: srli a0, a0, 32
; RV64ZBAZBB-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sext_ashr_zext_i8:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 56
+; RV64XANDESPERF-NEXT: srai a0, a0, 31
+; RV64XANDESPERF-NEXT: srli a0, a0, 32
+; RV64XANDESPERF-NEXT: ret
%ext = sext i8 %a to i32
%1 = ashr i32 %ext, 9
ret i32 %1
@@ -1766,6 +2346,12 @@ define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64ZBA-NEXT: sh3add a1, a1, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh6_sh3_add1:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: nds.lea.d a1, a2, a1
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%shl = shl i64 %z, 3
%shl1 = shl i64 %y, 6
@@ -1788,6 +2374,13 @@ define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64ZBA-NEXT: sh3add a1, a1, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh6_sh3_add2:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: slli a1, a1, 6
+; RV64XANDESPERF-NEXT: add a0, a1, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a2
+; RV64XANDESPERF-NEXT: ret
entry:
%shl = shl i64 %z, 3
%shl1 = shl i64 %y, 6
@@ -1810,6 +2403,12 @@ define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64ZBA-NEXT: sh3add a1, a1, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh6_sh3_add3:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: nds.lea.d a1, a2, a1
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%shl = shl i64 %z, 3
%shl1 = shl i64 %y, 6
@@ -1833,6 +2432,13 @@ define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64ZBA-NEXT: sh3add a0, a2, a0
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh6_sh3_add4:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: slli a1, a1, 6
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a2
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%shl = shl i64 %z, 3
%shl1 = shl i64 %y, 6
@@ -1863,6 +2469,13 @@ define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
; RV64ZBAZBB-NEXT: slli a0, a0, 23
; RV64ZBAZBB-NEXT: srli a0, a0, 32
; RV64ZBAZBB-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sext_ashr_zext_i16:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a0, a0, 48
+; RV64XANDESPERF-NEXT: srai a0, a0, 25
+; RV64XANDESPERF-NEXT: srli a0, a0, 32
+; RV64XANDESPERF-NEXT: ret
%ext = sext i16 %a to i32
%1 = ashr i32 %ext, 9
ret i32 %1
@@ -1887,6 +2500,13 @@ define signext i16 @sh1adduw_ptrdiff(i64 %diff, ptr %baseptr) {
; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh1adduw_ptrdiff:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a0, a0, 1
+; RV64XANDESPERF-NEXT: nds.lea.h.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%ptrdiff = lshr exact i64 %diff, 1
%cast = and i64 %ptrdiff, 4294967295
%ptr = getelementptr inbounds i16, ptr %baseptr, i64 %cast
@@ -1911,6 +2531,13 @@ define signext i32 @sh2adduw_ptrdiff(i64 %diff, ptr %baseptr) {
; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh2adduw_ptrdiff:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a0, a0, 2
+; RV64XANDESPERF-NEXT: nds.lea.w.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%ptrdiff = lshr exact i64 %diff, 2
%cast = and i64 %ptrdiff, 4294967295
%ptr = getelementptr inbounds i32, ptr %baseptr, i64 %cast
@@ -1935,6 +2562,13 @@ define i64 @sh3adduw_ptrdiff(i64 %diff, ptr %baseptr) {
; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: sh3adduw_ptrdiff:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a0, a0, 3
+; RV64XANDESPERF-NEXT: nds.lea.d.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%ptrdiff = lshr exact i64 %diff, 3
%cast = and i64 %ptrdiff, 4294967295
%ptr = getelementptr inbounds i64, ptr %baseptr, i64 %cast
@@ -1957,6 +2591,13 @@ define signext i16 @srliw_1_sh1add(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_1_sh1add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 1
+; RV64XANDESPERF-NEXT: nds.lea.h.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 1
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i16, ptr %0, i64 %4
@@ -1984,6 +2625,17 @@ define i128 @slliuw_ptrdiff(i64 %diff, ptr %baseptr) {
; RV64ZBA-NEXT: ld a0, 0(a1)
; RV64ZBA-NEXT: ld a1, 8(a1)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: slliuw_ptrdiff:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a2, 1
+; RV64XANDESPERF-NEXT: slli a2, a2, 36
+; RV64XANDESPERF-NEXT: addi a2, a2, -16
+; RV64XANDESPERF-NEXT: and a0, a0, a2
+; RV64XANDESPERF-NEXT: add a1, a1, a0
+; RV64XANDESPERF-NEXT: ld a0, 0(a1)
+; RV64XANDESPERF-NEXT: ld a1, 8(a1)
+; RV64XANDESPERF-NEXT: ret
%ptrdiff = lshr exact i64 %diff, 4
%cast = and i64 %ptrdiff, 4294967295
%ptr = getelementptr inbounds i128, ptr %baseptr, i64 %cast
@@ -2006,6 +2658,13 @@ define signext i32 @srliw_2_sh2add(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_2_sh2add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 2
+; RV64XANDESPERF-NEXT: nds.lea.w.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 2
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i32, ptr %0, i64 %4
@@ -2025,9 +2684,16 @@ define i64 @srliw_3_sh3add(ptr %0, i32 signext %1) {
; RV64ZBA-LABEL: srliw_3_sh3add:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: srliw a1, a1, 3
-; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: sh3add.uw a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_3_sh3add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 3
+; RV64XANDESPERF-NEXT: nds.lea.d.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 3
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2050,6 +2716,13 @@ define signext i32 @srliw_1_sh2add(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_1_sh2add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 1
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 1
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i32, ptr %0, i64 %4
@@ -2072,6 +2745,13 @@ define i64 @srliw_1_sh3add(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_1_sh3add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 1
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 1
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2094,6 +2774,13 @@ define i64 @srliw_2_sh3add(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_2_sh3add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 2
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 2
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2116,6 +2803,13 @@ define signext i16 @srliw_2_sh1add(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_2_sh1add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 2
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 2
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i16, ptr %0, i64 %4
@@ -2139,6 +2833,13 @@ define signext i32 @srliw_3_sh2add(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_3_sh2add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 3
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 3
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i32, ptr %0, i64 %4
@@ -2161,6 +2862,13 @@ define i64 @srliw_4_sh3add(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srliw_4_sh3add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a1, a1, 4
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i32 %1, 4
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2183,6 +2891,13 @@ define signext i32 @srli_1_sh2add(ptr %0, i64 %1) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_1_sh2add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a1, a1, 1
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i64 %1, 1
%4 = getelementptr inbounds i32, ptr %0, i64 %3
%5 = load i32, ptr %4, align 4
@@ -2204,6 +2919,13 @@ define i64 @srli_2_sh3add(ptr %0, i64 %1) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_2_sh3add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a1, a1, 2
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i64 %1, 2
%4 = getelementptr inbounds i64, ptr %0, i64 %3
%5 = load i64, ptr %4, align 8
@@ -2225,6 +2947,13 @@ define signext i16 @srli_2_sh1add(ptr %0, i64 %1) {
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_2_sh1add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a1, a1, 2
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i64 %1, 2
%4 = getelementptr inbounds i16, ptr %0, i64 %3
%5 = load i16, ptr %4, align 2
@@ -2246,6 +2975,13 @@ define signext i32 @srli_3_sh2add(ptr %0, i64 %1) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_3_sh2add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a1, a1, 3
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i64 %1, 3
%4 = getelementptr inbounds i32, ptr %0, i64 %3
%5 = load i32, ptr %4, align 4
@@ -2267,6 +3003,13 @@ define i64 @srli_4_sh3add(ptr %0, i64 %1) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_4_sh3add:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a1, a1, 4
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = lshr i64 %1, 4
%4 = getelementptr inbounds i64, ptr %0, i64 %3
%5 = load i64, ptr %4, align 8
@@ -2288,6 +3031,13 @@ define signext i16 @shl_2_sh1adduw(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh1add.uw a0, a1, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: shl_2_sh1adduw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 2
+; RV64XANDESPERF-NEXT: nds.lea.h.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = shl i32 %1, 2
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i16, ptr %0, i64 %4
@@ -2310,6 +3060,13 @@ define signext i32 @shl_16_sh2adduw(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh2add.uw a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: shl_16_sh2adduw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 16
+; RV64XANDESPERF-NEXT: nds.lea.w.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = shl i32 %1, 16
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i32, ptr %0, i64 %4
@@ -2332,6 +3089,13 @@ define i64 @shl_31_sh3adduw(ptr %0, i32 signext %1) {
; RV64ZBA-NEXT: sh3add.uw a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: shl_31_sh3adduw:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 31
+; RV64XANDESPERF-NEXT: nds.lea.d.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%3 = shl i32 %1, 31
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
@@ -2353,6 +3117,12 @@ define i64 @pack_i64(i64 %a, i64 %b) nounwind {
; RV64ZBA-NEXT: slli a1, a1, 32
; RV64ZBA-NEXT: add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: pack_i64:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 32
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%shl = and i64 %a, 4294967295
%shl1 = shl i64 %b, 32
%or = or i64 %shl1, %shl
@@ -2373,6 +3143,12 @@ define i64 @pack_i64_2(i32 signext %a, i32 signext %b) nounwind {
; RV64ZBA-NEXT: slli a1, a1, 32
; RV64ZBA-NEXT: add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: pack_i64_2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 32
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%zexta = zext i32 %a to i64
%zextb = zext i32 %b to i64
%shl1 = shl i64 %zextb, 32
@@ -2392,6 +3168,11 @@ define i64 @pack_i64_disjoint(i64 %a, i64 %b) nounwind {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: pack_i64_disjoint:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%shl = and i64 %a, 4294967295
%or = or disjoint i64 %b, %shl
ret i64 %or
@@ -2409,6 +3190,11 @@ define i64 @pack_i64_disjoint_2(i32 signext %a, i64 %b) nounwind {
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: pack_i64_disjoint_2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
%zexta = zext i32 %a to i64
%or = or disjoint i64 %b, %zexta
ret i64 %or
@@ -2429,6 +3215,13 @@ define i8 @array_index_sh1_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: add a0, a0, a2
; RV64ZBA-NEXT: lbu a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh1_sh0:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a1
+; RV64XANDESPERF-NEXT: add a0, a0, a2
+; RV64XANDESPERF-NEXT: lbu a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [2 x i8], ptr %p, i64 %idx1, i64 %idx2
%b = load i8, ptr %a, align 1
ret i8 %b
@@ -2450,6 +3243,13 @@ define i16 @array_index_sh1_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh1add a0, a2, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh1_sh1:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a2
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [2 x i16], ptr %p, i64 %idx1, i64 %idx2
%b = load i16, ptr %a, align 2
ret i16 %b
@@ -2471,6 +3271,13 @@ define i32 @array_index_sh1_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh2add a0, a2, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh1_sh2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a2
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [2 x i32], ptr %p, i64 %idx1, i64 %idx2
%b = load i32, ptr %a, align 4
ret i32 %b
@@ -2492,6 +3299,14 @@ define i64 @array_index_sh1_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh1_sh3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 4
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a2
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [2 x i64], ptr %p, i64 %idx1, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
@@ -2512,6 +3327,13 @@ define i8 @array_index_sh2_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: add a0, a0, a2
; RV64ZBA-NEXT: lbu a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh2_sh0:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a1
+; RV64XANDESPERF-NEXT: add a0, a0, a2
+; RV64XANDESPERF-NEXT: lbu a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [4 x i8], ptr %p, i64 %idx1, i64 %idx2
%b = load i8, ptr %a, align 1
ret i8 %b
@@ -2533,6 +3355,13 @@ define i16 @array_index_sh2_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh1add a0, a2, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh2_sh1:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a2
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [4 x i16], ptr %p, i64 %idx1, i64 %idx2
%b = load i16, ptr %a, align 2
ret i16 %b
@@ -2554,6 +3383,14 @@ define i32 @array_index_sh2_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh2_sh2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 4
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a2
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [4 x i32], ptr %p, i64 %idx1, i64 %idx2
%b = load i32, ptr %a, align 4
ret i32 %b
@@ -2575,6 +3412,14 @@ define i64 @array_index_sh2_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh2_sh3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 5
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a2
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [4 x i64], ptr %p, i64 %idx1, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
@@ -2595,6 +3440,13 @@ define i8 @array_index_sh3_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: add a0, a0, a2
; RV64ZBA-NEXT: lbu a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh3_sh0:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: add a0, a0, a2
+; RV64XANDESPERF-NEXT: lbu a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [8 x i8], ptr %p, i64 %idx1, i64 %idx2
%b = load i8, ptr %a, align 1
ret i8 %b
@@ -2616,6 +3468,14 @@ define i16 @array_index_sh3_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh3_sh1:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 4
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a2
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [8 x i16], ptr %p, i64 %idx1, i64 %idx2
%b = load i16, ptr %a, align 2
ret i16 %b
@@ -2637,6 +3497,14 @@ define i32 @array_index_sh3_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh3_sh2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 5
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a2
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [8 x i32], ptr %p, i64 %idx1, i64 %idx2
%b = load i32, ptr %a, align 4
ret i32 %b
@@ -2658,6 +3526,14 @@ define i64 @array_index_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh3_sh3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 6
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a2
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [8 x i64], ptr %p, i64 %idx1, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
@@ -2683,6 +3559,15 @@ define i64 @array_index_lshr_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_lshr_sh3_sh3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srli a1, a1, 58
+; RV64XANDESPERF-NEXT: slli a1, a1, 6
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a2
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%shr = lshr i64 %idx1, 58
%a = getelementptr inbounds [8 x i64], ptr %p, i64 %shr, i64 %idx2
%b = load i64, ptr %a, align 8
@@ -2719,6 +3604,14 @@ define i16 @array_index_sh4_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh1add a0, a2, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh4_sh1:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 5
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a2
+; RV64XANDESPERF-NEXT: lh a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [16 x i16], ptr %p, i64 %idx1, i64 %idx2
%b = load i16, ptr %a, align 2
ret i16 %b
@@ -2741,6 +3634,14 @@ define i32 @array_index_sh4_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh2add a0, a2, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh4_sh2:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 6
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a2
+; RV64XANDESPERF-NEXT: lw a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [16 x i32], ptr %p, i64 %idx1, i64 %idx2
%b = load i32, ptr %a, align 4
ret i32 %b
@@ -2763,6 +3664,14 @@ define i64 @array_index_sh4_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-NEXT: sh3add a0, a2, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: array_index_sh4_sh3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: slli a1, a1, 7
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a2
+; RV64XANDESPERF-NEXT: ld a0, 0(a0)
+; RV64XANDESPERF-NEXT: ret
%a = getelementptr inbounds [16 x i64], ptr %p, i64 %idx1, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
@@ -2784,6 +3693,14 @@ define ptr @test_gep_gep_dont_crash(ptr %p, i64 %a1, i64 %a2) {
; RV64ZBA-NEXT: add a1, a2, a1
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: test_gep_gep_dont_crash:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: srliw a2, a2, 6
+; RV64XANDESPERF-NEXT: slli a2, a2, 3
+; RV64XANDESPERF-NEXT: add a0, a0, a2
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
%lshr = lshr i64 %a2, 6
%and = and i64 %lshr, 67108863
%gep1 = getelementptr i64, ptr %p, i64 %and
@@ -2807,6 +3724,14 @@ define i64 @regression(i32 signext %x, i32 signext %y) {
; RV64ZBA-NEXT: slli.uw a0, a0, 3
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: regression:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: subw a0, a0, a1
+; RV64XANDESPERF-NEXT: slli a0, a0, 32
+; RV64XANDESPERF-NEXT: srli a0, a0, 29
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
%sub = sub i32 %x, %y
%ext = zext i32 %sub to i64
%res = mul nuw nsw i64 %ext, 24
@@ -2845,6 +3770,12 @@ define i64 @mul_neg3(i64 %a) {
; RV64ZBA-NEXT: sh1add a0, a0, a0
; RV64ZBA-NEXT: neg a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul_neg3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: neg a0, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, -3
ret i64 %c
}
@@ -2872,6 +3803,12 @@ define i64 @mul_neg5(i64 %a) {
; RV64ZBA-NEXT: sh2add a0, a0, a0
; RV64ZBA-NEXT: neg a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: mul_neg5:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: neg a0, a0
+; RV64XANDESPERF-NEXT: ret
%c = mul i64 %a, -5
ret i64 %c
}
@@ -2938,6 +3875,14 @@ define i64 @bext_mul12(i32 %1, i32 %2) {
; RV64ZBAZBBZBS-NEXT: sh1add a0, a0, a0
; RV64ZBAZBBZBS-NEXT: slli a0, a0, 2
; RV64ZBAZBBZBS-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: bext_mul12:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: srlw a0, a0, a1
+; RV64XANDESPERF-NEXT: andi a0, a0, 1
+; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a0
+; RV64XANDESPERF-NEXT: slli a0, a0, 2
+; RV64XANDESPERF-NEXT: ret
entry:
%3 = lshr i32 %1, %2
%4 = and i32 %3, 1
@@ -2977,6 +3922,14 @@ define i64 @bext_mul45(i32 %1, i32 %2) {
; RV64ZBAZBBZBS-NEXT: sh2add a0, a0, a0
; RV64ZBAZBBZBS-NEXT: sh3add a0, a0, a0
; RV64ZBAZBBZBS-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: bext_mul45:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: srlw a0, a0, a1
+; RV64XANDESPERF-NEXT: andi a0, a0, 1
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a0
+; RV64XANDESPERF-NEXT: ret
entry:
%3 = lshr i32 %1, %2
%4 = and i32 %3, 1
@@ -3017,6 +3970,14 @@ define i64 @bext_mul132(i32 %1, i32 %2) {
; RV64ZBAZBBZBS-NEXT: slli a1, a0, 7
; RV64ZBAZBBZBS-NEXT: sh2add a0, a0, a1
; RV64ZBAZBBZBS-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: bext_mul132:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: srlw a0, a0, a1
+; RV64XANDESPERF-NEXT: andi a0, a0, 1
+; RV64XANDESPERF-NEXT: slli a1, a0, 7
+; RV64XANDESPERF-NEXT: nds.lea.w a0, a1, a0
+; RV64XANDESPERF-NEXT: ret
entry:
%3 = lshr i32 %1, %2
%4 = and i32 %3, 1
@@ -3043,6 +4004,17 @@ define ptr @gep_lshr_i32(ptr %0, i64 %1) {
; RV64ZBA-NEXT: sh2add a1, a1, a1
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: gep_lshr_i32:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: slli a1, a1, 2
+; RV64XANDESPERF-NEXT: li a2, 1
+; RV64XANDESPERF-NEXT: slli a2, a2, 36
+; RV64XANDESPERF-NEXT: addi a2, a2, -16
+; RV64XANDESPERF-NEXT: and a1, a1, a2
+; RV64XANDESPERF-NEXT: nds.lea.w a1, a1, a1
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%2 = lshr exact i64 %1, 2
%3 = and i64 %2, 4294967295
@@ -3065,6 +4037,15 @@ define i64 @srli_slliuw(i64 %1) {
; RV64ZBA-NEXT: srli a0, a0, 2
; RV64ZBA-NEXT: slli.uw a0, a0, 4
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_slliuw:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: slli a0, a0, 2
+; RV64XANDESPERF-NEXT: li a1, 1
+; RV64XANDESPERF-NEXT: slli a1, a1, 36
+; RV64XANDESPERF-NEXT: addi a1, a1, -16
+; RV64XANDESPERF-NEXT: and a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%2 = lshr exact i64 %1, 2
%3 = and i64 %2, 4294967295
@@ -3087,6 +4068,15 @@ define i64 @srli_slliuw_canonical(i64 %0) {
; RV64ZBA-NEXT: srli a0, a0, 2
; RV64ZBA-NEXT: slli.uw a0, a0, 4
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_slliuw_canonical:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: slli a0, a0, 2
+; RV64XANDESPERF-NEXT: li a1, 1
+; RV64XANDESPERF-NEXT: slli a1, a1, 36
+; RV64XANDESPERF-NEXT: addi a1, a1, -16
+; RV64XANDESPERF-NEXT: and a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%1 = shl i64 %0, 2
%2 = and i64 %1, 68719476720
@@ -3136,6 +4126,15 @@ define i64 @srli_slliuw_2(i64 %1) {
; RV64ZBA-NEXT: srli a0, a0, 18
; RV64ZBA-NEXT: slli.uw a0, a0, 3
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_slliuw_2:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: srli a0, a0, 15
+; RV64XANDESPERF-NEXT: li a1, 1
+; RV64XANDESPERF-NEXT: slli a1, a1, 35
+; RV64XANDESPERF-NEXT: addi a1, a1, -8
+; RV64XANDESPERF-NEXT: and a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%2 = lshr i64 %1, 18
%3 = and i64 %2, 4294967295
@@ -3158,6 +4157,15 @@ define i64 @srli_slliuw_canonical_2(i64 %0) {
; RV64ZBA-NEXT: srli a0, a0, 18
; RV64ZBA-NEXT: slli.uw a0, a0, 3
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srli_slliuw_canonical_2:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: srli a0, a0, 15
+; RV64XANDESPERF-NEXT: li a1, 1
+; RV64XANDESPERF-NEXT: slli a1, a1, 35
+; RV64XANDESPERF-NEXT: addi a1, a1, -8
+; RV64XANDESPERF-NEXT: and a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%1 = lshr i64 %0, 15
%2 = and i64 %1, 34359738360
@@ -3179,6 +4187,13 @@ define ptr @srai_srli_sh3add(ptr %0, i64 %1) nounwind {
; RV64ZBA-NEXT: srli a1, a1, 6
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: srai_srli_sh3add:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: srai a1, a1, 32
+; RV64XANDESPERF-NEXT: srli a1, a1, 6
+; RV64XANDESPERF-NEXT: nds.lea.d a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%2 = ashr i64 %1, 32
%3 = lshr i64 %2, 6
@@ -3246,6 +4261,16 @@ define i64 @add_u32simm32_zextw(i64 %x) nounwind {
; RV64ZBA-NEXT: addi a0, a0, -2
; RV64ZBA-NEXT: zext.w a0, a0
; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: add_u32simm32_zextw:
+; RV64XANDESPERF: # %bb.0: # %entry
+; RV64XANDESPERF-NEXT: li a1, 1
+; RV64XANDESPERF-NEXT: slli a1, a1, 32
+; RV64XANDESPERF-NEXT: addi a1, a1, -2
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: addi a1, a1, 1
+; RV64XANDESPERF-NEXT: and a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
entry:
%add = add i64 %x, 4294967294
%and = and i64 %add, 4294967295
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zvqdotq.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zvqdotq.ll
index e5546ad..ff61ef8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zvqdotq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zvqdotq.ll
@@ -314,11 +314,10 @@ define i32 @vqdot_vv_accum(<16 x i8> %a, <16 x i8> %b, <16 x i32> %x) {
; DOT-LABEL: vqdot_vv_accum:
; DOT: # %bb.0: # %entry
; DOT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DOT-NEXT: vmv.v.i v10, 0
-; DOT-NEXT: vqdot.vv v10, v8, v9
-; DOT-NEXT: vadd.vv v8, v10, v12
+; DOT-NEXT: vmv1r.v v16, v12
+; DOT-NEXT: vqdot.vv v16, v8, v9
; DOT-NEXT: vsetivli zero, 4, e32, m4, tu, ma
-; DOT-NEXT: vmv.v.v v12, v8
+; DOT-NEXT: vmv.v.v v12, v16
; DOT-NEXT: vmv.s.x v8, zero
; DOT-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; DOT-NEXT: vredsum.vs v8, v12, v8
@@ -349,11 +348,10 @@ define i32 @vqdotu_vv_accum(<16 x i8> %a, <16 x i8> %b, <16 x i32> %x) {
; DOT-LABEL: vqdotu_vv_accum:
; DOT: # %bb.0: # %entry
; DOT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DOT-NEXT: vmv.v.i v10, 0
-; DOT-NEXT: vqdotu.vv v10, v8, v9
-; DOT-NEXT: vadd.vv v8, v10, v12
+; DOT-NEXT: vmv1r.v v16, v12
+; DOT-NEXT: vqdotu.vv v16, v8, v9
; DOT-NEXT: vsetivli zero, 4, e32, m4, tu, ma
-; DOT-NEXT: vmv.v.v v12, v8
+; DOT-NEXT: vmv.v.v v12, v16
; DOT-NEXT: vmv.s.x v8, zero
; DOT-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; DOT-NEXT: vredsum.vs v8, v12, v8
@@ -384,11 +382,10 @@ define i32 @vqdotsu_vv_accum(<16 x i8> %a, <16 x i8> %b, <16 x i32> %x) {
; DOT-LABEL: vqdotsu_vv_accum:
; DOT: # %bb.0: # %entry
; DOT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DOT-NEXT: vmv.v.i v10, 0
-; DOT-NEXT: vqdotsu.vv v10, v8, v9
-; DOT-NEXT: vadd.vv v8, v10, v12
+; DOT-NEXT: vmv1r.v v16, v12
+; DOT-NEXT: vqdotsu.vv v16, v8, v9
; DOT-NEXT: vsetivli zero, 4, e32, m4, tu, ma
-; DOT-NEXT: vmv.v.v v12, v8
+; DOT-NEXT: vmv.v.v v12, v16
; DOT-NEXT: vmv.s.x v8, zero
; DOT-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; DOT-NEXT: vredsum.vs v8, v12, v8
@@ -516,12 +513,10 @@ define i32 @vqdot_vv_split(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %
; DOT: # %bb.0: # %entry
; DOT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; DOT-NEXT: vmv.v.i v12, 0
-; DOT-NEXT: vmv.v.i v13, 0
; DOT-NEXT: vqdot.vv v12, v8, v9
-; DOT-NEXT: vqdot.vv v13, v10, v11
-; DOT-NEXT: vadd.vv v8, v12, v13
-; DOT-NEXT: vmv.s.x v9, zero
-; DOT-NEXT: vredsum.vs v8, v8, v9
+; DOT-NEXT: vqdot.vv v12, v10, v11
+; DOT-NEXT: vmv.s.x v8, zero
+; DOT-NEXT: vredsum.vs v8, v12, v8
; DOT-NEXT: vmv.x.s a0, v8
; DOT-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
index a2acb00..f545ecc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
@@ -105,3 +105,33 @@ body: |
%3:vr = COPY %0
...
---
+name: diff_regclass
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: diff_regclass
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 $noreg, 0, 0, 5 /* e32 */, 1 /* ta, mu */
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vmv0 = COPY $v8
+ ; CHECK-NEXT: [[PseudoVADD_VV_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVADD_VV_M1_MASK [[PseudoVMV_V_I_MF2_]], $noreg, $noreg, [[COPY]], 0, 5 /* e32 */, 0 /* tu, mu */
+ %0:vr = PseudoVMV_V_I_MF2 $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */
+ %1:vrnov0 = PseudoVMV_V_V_MF2 $noreg, %0, 0, 5 /* e32 */, 0 /* tu, mu */
+ %2:vmv0 = COPY $v8
+ %3:vrnov0 = PseudoVADD_VV_M1_MASK %1, $noreg, $noreg, %2, 0, 5 /* e32 */, 0 /* tu, mu */
+...
+---
+name: diff_regclass_passthru
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: diff_regclass_passthru
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 $noreg, 0, 0, 5 /* e32 */, 1 /* ta, mu */
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vmv0 = COPY $v8
+ ; CHECK-NEXT: [[PseudoVLSE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], $noreg, $noreg, [[COPY]], 0, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 4)
+ %2:vr = PseudoVMV_V_I_MF2 $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */
+ %3:vrnov0 = PseudoVMV_V_V_MF2 $noreg, %2, 0, 5 /* e32 */, 0 /* tu, mu */
+ %7:vmv0 = COPY $v8
+ %6:vrnov0 = PseudoVLSE32_V_MF2_MASK %3, $noreg, $noreg, %7, 0, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 4)
diff --git a/llvm/test/CodeGen/X86/apx/reloc-opt.ll b/llvm/test/CodeGen/X86/apx/reloc-opt.ll
new file mode 100644
index 0000000..a5ab94b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/reloc-opt.ll
@@ -0,0 +1,269 @@
+; RUN: llc -mcpu=diamondrapids %s -mtriple=x86_64 -filetype=obj -o %t.o
+; RUN: llvm-objdump --no-print-imm-hex -dr %t.o | FileCheck %s --check-prefixes=NOAPXREL,CHECK
+
+; RUN: llc -mcpu=diamondrapids %s -mtriple=x86_64 -filetype=obj -o %t.o -x86-enable-apx-for-relocation=true
+; RUN: llvm-objdump --no-print-imm-hex -dr %t.o | FileCheck %s --check-prefixes=APXREL,CHECK
+
+
+; All tests are used to check no R_X86_64_CODE_4_GOTPCRELX relocation type
+; emitted if APX features is disabled for relocation.
+; The first 2 tests are used to check if the register class is not
+; updated/recomputed by register allocator. It's originally updated to non-rex2
+; register class by "Suppress APX for relocation" pass.
+
+
+; CHECK-LABEL: test_regclass_not_updated_by_regalloc_1
+; APXREL: movq (%rip), %r16
+; APXREL-NEXT: R_X86_64_CODE_4_GOTPCRELX gvar-0x4
+; NOAPXREL-NOT: R_X86_64_CODE_4_GOTPCRELX gvar-0x4
+; NOAPXREL: movq (%rip), %rdi
+; NOAPXREL-NEXT: R_X86_64_REX_GOTPCRELX gvar-0x4
+
+@gvar = external global [20000 x i8]
+
+define void @test_regclass_not_updated_by_regalloc_1(ptr %ptr1, ptr %0, i32 %int1, i64 %int_sext, i64 %mul.447, i64 %int_sext3, i32 %fetch.2508, i32 %fetch.2513, i32 %mul.442, i64 %int_sext6, i64 %int_sext7, i64 %int_sext8, i1 %cond1, i1 %cond2) {
+alloca_38:
+ %int_sext4 = sext i32 %int1 to i64
+ tail call void @llvm.memset.p0.i64(ptr @gvar, i8 0, i64 20000, i1 false)
+ %div.161 = sdiv i64 %int_sext3, %int_sext
+ %cmp.2 = icmp sgt i64 %div.161, 0
+ %1 = sub i64 %int_sext7, %mul.447
+ br label %loop.41
+
+loop.41: ; preds = %ifmerge.2, %alloca_38
+ br i1 %cmp.2, label %L.53, label %ifmerge.2
+
+L.53: ; preds = %loop.41
+ %2 = getelementptr i8, ptr %ptr1, i64 %int_sext8
+ br label %loop.83
+
+loop.83: ; preds = %loop.83, %L.53
+ %i2.i64.1 = phi i64 [ 0, %L.53 ], [ %nextloop.83, %loop.83 ]
+ %3 = mul i64 %i2.i64.1, %int_sext4
+ %.r275 = add i64 %3, %1
+ %4 = getelementptr float, ptr getelementptr ([20000 x i8], ptr @gvar, i64 0, i64 8000), i64 %.r275
+ %gepload = load float, ptr %2, align 1
+ store float %gepload, ptr %4, align 4
+ %nextloop.83 = add i64 %i2.i64.1, 1
+ br i1 %cond1, label %ifmerge.2, label %loop.83
+
+ifmerge.2: ; preds = %loop.83, %loop.41
+ br i1 %cond2, label %afterloop.41, label %loop.41
+
+afterloop.41: ; preds = %ifmerge.2
+ %mul.469 = mul i32 %mul.442, %fetch.2508
+ %div.172 = mul i32 %fetch.2513, %mul.469
+ %mul.471 = mul i32 %int1, %div.172
+ %int_sext39 = sext i32 %mul.471 to i64
+ %5 = mul i64 %int_sext6, %int_sext39
+ %6 = getelementptr i8, ptr %ptr1, i64 %5
+ %7 = load float, ptr %6, align 1
+ store float %7, ptr null, align 4
+ ret void
+}
+
+declare void @llvm.memset.p0.i64(ptr writeonly captures(none), i8, i64, i1 immarg)
+
+; TODO: update after R_X86_64_CODE_6_GOTPCRELX is supported.
+; CHECK-LABEL: test_regclass_not_updated_by_regalloc_2
+; APXREL: {nf} addq (%rip), %r16, %rcx
+; APXREL-NEXT: R_X86_64_GOTPCREL gvar2-0x4
+; NOAPXREL-NOT: R_X86_64_CODE_4_GOTPCRELX gvar2-0x4
+; NOAPXREL: addq (%rip), %rbx
+; NOAPXREL-NEXT: R_X86_64_REX_GOTPCRELX gvar2-0x4
+
+@gvar2 = external constant [8 x [8 x i32]]
+
+define void @test_regclass_not_updated_by_regalloc_2(ptr %pSrc1, i32 %srcStep1, ptr %pSrc2, i32 %srcStep2, i32 %width, i32 %0, i1 %cmp71.not783, i1 %cmp11.i, ptr %pSrc2.addr.0535.i) {
+entry:
+ %1 = ashr i32 %srcStep2, 1
+ %conv.i = sext i32 %width to i64
+ %conv6.i = and i32 %srcStep1, 1
+ %cmp.i = icmp sgt i32 %srcStep1, 0
+ %idx.ext.i = zext i32 %conv6.i to i64
+ %2 = getelementptr <4 x i64>, ptr @gvar2, i64 %idx.ext.i
+ %idx.ext183.i = sext i32 %1 to i64
+ br i1 %cmp71.not783, label %for.end, label %for.body73.lr.ph
+
+for.body73.lr.ph: ; preds = %entry
+ %3 = load <4 x i64>, ptr %2, align 32
+ %..i = select i1 %cmp11.i, <4 x i64> zeroinitializer, <4 x i64> splat (i64 1)
+ %4 = bitcast <4 x i64> %..i to <8 x i32>
+ %5 = bitcast <4 x i64> %3 to <8 x i32>
+ %. = select i1 %cmp.i, <8 x i32> splat (i32 1), <8 x i32> %4
+ %.833 = select i1 %cmp.i, <8 x i32> %5, <8 x i32> zeroinitializer
+ br i1 %cmp11.i, label %for.end.i, label %for.end
+
+for.end.i: ; preds = %if.end153.i, %for.body73.lr.ph
+ %pSrc2.addr.0535.i5 = phi ptr [ %add.ptr184.i, %if.end153.i ], [ %pSrc2, %for.body73.lr.ph ]
+ %eSum0.0531.i = phi <4 x i64> [ %add.i452.i, %if.end153.i ], [ zeroinitializer, %for.body73.lr.ph ]
+ br i1 %cmp71.not783, label %if.end153.i, label %if.then90.i
+
+if.then90.i: ; preds = %for.end.i
+ %6 = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr null, <8 x i32> %.)
+ %add.i464.i = or <4 x i64> %eSum0.0531.i, zeroinitializer
+ %7 = bitcast <8 x i32> %.833 to <4 x i64>
+ %add.ptr152.i = getelementptr i16, ptr %pSrc2.addr.0535.i5, i64 %conv.i
+ br label %if.end153.i
+
+if.end153.i: ; preds = %if.then90.i, %for.end.i
+ %eSum0.2.i = phi <4 x i64> [ %7, %if.then90.i ], [ %eSum0.0531.i, %for.end.i ]
+ %pLocSrc2.1.i = phi ptr [ %add.ptr152.i, %if.then90.i ], [ %pSrc1, %for.end.i ]
+ %8 = load i16, ptr %pLocSrc2.1.i, align 2
+ %conv165.i = zext i16 %8 to i32
+ %vecinit3.i.i = insertelement <4 x i32> zeroinitializer, i32 %conv165.i, i64 0
+ %9 = bitcast <4 x i32> %vecinit3.i.i to <2 x i64>
+ %shuffle.i503.i = shufflevector <2 x i64> %9, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %add.i452.i = or <4 x i64> %eSum0.2.i, %shuffle.i503.i
+ %add.ptr184.i = getelementptr i16, ptr %pSrc2.addr.0535.i, i64 %idx.ext183.i
+ br label %for.end.i
+
+for.end: ; preds = %for.body73.lr.ph, %entry
+ br label %for.cond29.preheader.i227
+
+for.cond29.preheader.i227: ; preds = %for.end
+ br label %for.body32.i328
+
+for.body32.i328: ; preds = %for.body32.i328, %for.cond29.preheader.i227
+ %w.0524.i329 = phi i32 [ %sub.i381, %for.body32.i328 ], [ 0, %for.cond29.preheader.i227 ]
+ %sub.i381 = or i32 %w.0524.i329, 0
+ %cmp30.i384 = icmp sgt i32 %w.0524.i329, 0
+ br label %for.body32.i328
+}
+
+declare <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr, <8 x i32>)
+
+
+; The test is used to check MOV64rm instruction with relocation and ADD64rr_ND
+; instruction are not folded to ADD64rm_ND with relocation. The later will emit
+; APX relocation which is not recognized by the builtin linker on released OS.
+
+; CHECK-LABEL: test_mem_fold
+; NOAPXREL-NOT: R_X86_64_CODE_4_GOTPCRELX gvar3-0x4
+; NOAPXREL: movq (%rip), %rbx
+; NOAPXREL-NEXT: R_X86_64_REX_GOTPCRELX gvar3-0x4
+
+@gvar3 = external global [40000 x i8]
+
+define void @test_mem_fold(i32 %fetch.1644, i32 %sub.1142, i32 %mul.455, ptr %dval1, ptr %j1, ptr %j2, <4 x i1> %0, i1 %condloop.41.not, i32 %fetch.1646, i32 %fetch.1647, i32 %sub.1108, i64 %int_sext16, i64 %sub.1114, i1 %condloop.45.not.not, <4 x i1> %1) {
+alloca_28:
+ br label %ifmerge.52
+
+do.body903: ; preds = %ifmerge.2
+ %mul.453 = mul i32 %sub.1108, %fetch.1647
+ %sub.1144.neg = or i32 %mul.455, %fetch.1646
+ %mul.454.neg = mul i32 %sub.1144.neg, %fetch.1644
+ %sub.1147 = sub i32 0, %sub.1142
+ %int_sext36 = sext i32 %mul.453 to i64
+ %int_sext38 = sext i32 %mul.454.neg to i64
+ %add.974 = or i64 %int_sext36, %int_sext38
+ %div.98 = sdiv i64 %add.974, %int_sext16
+ br label %do.body907
+
+do.body907: ; preds = %do.body907, %do.body903
+ %do.count41.0 = phi i64 [ %sub.1173, %do.body907 ], [ %div.98, %do.body903 ]
+ %gvar3.load = load double, ptr @gvar3, align 8
+ store double %gvar3.load, ptr null, align 8
+ call void (...) null(ptr null, ptr null, ptr null, ptr null, ptr %dval1, ptr null, ptr %j1, ptr %j2, ptr null, ptr null, ptr null, ptr null, ptr null, i64 0)
+ store i32 %sub.1147, ptr null, align 4
+ %sub.1173 = or i64 %do.count41.0, 1
+ %rel.314 = icmp sgt i64 %do.count41.0, 0
+ br label %do.body907
+
+ifmerge.52: ; preds = %ifmerge.2, %alloca_28
+ %i1.i64.012 = phi i64 [ 0, %alloca_28 ], [ %sub.1114, %ifmerge.2 ]
+ %2 = getelementptr double, ptr @gvar3, i64 %i1.i64.012
+ br label %loop.45
+
+loop.45: ; preds = %loop.45, %ifmerge.52
+ %3 = getelementptr double, ptr %2, <4 x i64> zeroinitializer
+ %4 = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %3, i32 0, <4 x i1> %0, <4 x double> zeroinitializer)
+ call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %4, <4 x ptr> zeroinitializer, i32 0, <4 x i1> %0)
+ br i1 %condloop.45.not.not, label %loop.45, label %ifmerge.2
+
+ifmerge.2: ; preds = %loop.45
+ br i1 %condloop.41.not, label %do.body903, label %ifmerge.52
+}
+
+declare <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr>, i32 immarg, <4 x i1>, <4 x double>)
+declare void @llvm.masked.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, i32 immarg, <4 x i1>)
+
+
+; The test is to check no R_X86_64_CODE_4_GOTPCRELX relocation emitted when the
+; register in operand 0 of instruction with relocation is used in the PHI
+; instruction. In PHI elimination pass, PHI instruction is eliminated by
+; inserting COPY instruction. And in the late pass (Machine Copy Propagation
+; pass), the COPY instruction may be optimized and the register in operand 0 of
+; instruction with relocation may be replaced with EGPR.
+
+
+; CHECK-LABEL: test_phi_uses
+; APXREL: addq (%rip), %r16
+; APXREL-NEXT: R_X86_64_CODE_4_GOTPCRELX gvar4-0x4
+; APXREL: movq (%rip), %r17
+; APXREL-NEXT: R_X86_64_CODE_4_GOTPCRELX gvar5-0x4
+; APXREL: movq (%rip), %r18
+; APXREL-NEXT: R_X86_64_CODE_4_GOTPCRELX gvar6-0x4
+; APXREL: movq (%rip), %r19
+; APXREL-NEXT: R_X86_64_CODE_4_GOTPCRELX gvar7-0x4
+; APXREL: movq (%rip), %r22
+; APXREL-NEXT: R_X86_64_CODE_4_GOTPCRELX gvar8-0x4
+; APXREL: movq (%rip), %r23
+; APXREL-NEXT: R_X86_64_CODE_4_GOTPCRELX gvar9-0x4
+; APXREL: movq (%rip), %r24
+; APXREL-NEXT: R_X86_64_CODE_4_GOTPCRELX gvar10-0x4
+; NOAPXREL-NOT: R_X86_64_CODE_4_GOTPCRELX gvar5-0x4
+; NOAPXREL: movq (%rip), %r15
+; NOAPXREL-NEXT: R_X86_64_REX_GOTPCRELX gvar5-0x4
+
+
+@gvar4 = external global [33 x [33 x double]]
+@gvar5 = external global [33 x [33 x float]]
+@gvar6 = external global [33 x [33 x float]]
+@gvar7 = external global [33 x [33 x float]]
+@gvar8 = external global [33 x [33 x float]]
+@gvar9 = external global [33 x [33 x float]]
+@gvar10 = external global [33 x [33 x float]]
+
+define void @test_phi_uses(i64 %i1.i64.0, ptr %0, ptr %1, ptr %2, ptr %3, ptr %in0, ptr %4, ptr %5, i1 %cmp.144) #0 {
+alloca_15:
+ br label %loop.253
+
+loop.253: ; preds = %loop.1500, %alloca_15
+ %i1.i64.01 = phi i64 [ 0, %alloca_15 ], [ %6, %loop.1500 ]
+ %6 = add i64 %i1.i64.01, 1
+ br label %loop.254
+
+loop.254: ; preds = %loop.254, %loop.253
+ %i2.i64.02 = phi i64 [ %13, %loop.254 ], [ 0, %loop.253 ]
+ %7 = getelementptr [33 x [33 x float]], ptr @gvar10, i64 0, i64 %i2.i64.02, i64 %i1.i64.01
+ %gepload368 = load float, ptr %7, align 4
+ store double 0.000000e+00, ptr %0, align 8
+ %8 = getelementptr [33 x [33 x float]], ptr @gvar9, i64 0, i64 %i2.i64.02, i64 %i1.i64.01
+ %gepload369 = load float, ptr %8, align 4
+ store double 0.000000e+00, ptr %1, align 8
+ %9 = getelementptr [33 x [33 x float]], ptr @gvar8, i64 0, i64 %i2.i64.02, i64 %i1.i64.01
+ %gepload371 = load float, ptr %9, align 4
+ store double 0.000000e+00, ptr %2, align 8
+ %10 = getelementptr [33 x [33 x float]], ptr @gvar7, i64 0, i64 %i2.i64.02, i64 %i1.i64.01
+ %gepload373 = load float, ptr %10, align 4
+ %11 = getelementptr [33 x [33 x double]], ptr @gvar4, i64 0, i64 %i2.i64.02, i64 %i1.i64.0
+ store double 0.000000e+00, ptr %11, align 8
+ %12 = getelementptr [33 x [33 x float]], ptr @gvar6, i64 0, i64 %i2.i64.02, i64 %i1.i64.01
+ %gepload375 = load float, ptr %12, align 4
+ store double 0.000000e+00, ptr %3, align 8
+ store double 0.000000e+00, ptr %5, align 8
+ %13 = add i64 %i2.i64.02, 1
+ store double 0.000000e+00, ptr %in0, align 8
+ store double 0.000000e+00, ptr %4, align 8
+ %14 = getelementptr [33 x [33 x float]], ptr @gvar5, i64 0, i64 %i2.i64.02, i64 %i1.i64.01
+ %gepload392 = load float, ptr %14, align 4
+ br i1 %cmp.144, label %loop.1500, label %loop.254
+
+loop.1500: ; preds = %loop.254
+ %15 = getelementptr [33 x [33 x float]], ptr @gvar5, i64 0, i64 0, i64 %i1.i64.0
+ %gepload444 = load float, ptr %15, align 4
+ %16 = fpext float %gepload444 to double
+ store double %16, ptr null, align 8
+ br label %loop.253
+} \ No newline at end of file
diff --git a/llvm/test/CodeGen/X86/apx/reloc.mir b/llvm/test/CodeGen/X86/apx/reloc.mir
index 9009f5b..877549b 100644
--- a/llvm/test/CodeGen/X86/apx/reloc.mir
+++ b/llvm/test/CodeGen/X86/apx/reloc.mir
@@ -57,7 +57,12 @@
ret i32 undef
}
- define i32 @add64rm_nd() {
+ define i32 @add64rm_nd_gotpcrel() {
+ entry:
+ ret i32 undef
+ }
+
+ define i32 @add64rm_nd_gottpoff() {
entry:
ret i32 undef
}
@@ -253,7 +258,28 @@ body: |
# NOAPXREL: %1:gr64_norex2 = XOR64rm %0, $rip, 1, $noreg, target-flags(x86-gottpoff) @i, $noreg, implicit-def $eflags :: (load (s64))
...
---
-name: add64rm_nd
+name: add64rm_nd_gotpcrel
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64 }
+ - { id: 1, class: gr64 }
+ - { id: 2, class: gr32 }
+body: |
+ bb.0.entry:
+ %0:gr64 = MOV64rm $rip, 1, $noreg, @x, $noreg :: (load (s64))
+ %1:gr64 = ADD64rm_ND %0, $rip, 1, $noreg, target-flags(x86-gotpcrel) @i, $noreg, implicit-def dead $eflags :: (load (s64) from got)
+ %2:gr32 = MOV32rm killed %1, 1, $noreg, 0, $fs :: (load (s32))
+ $eax = COPY %2
+ RET 0, $eax
+
+# CHECK: name: add64rm_nd_gotpcrel
+# APXREL: %1:gr64 = ADD64rm_ND %0, $rip, 1, $noreg, target-flags(x86-gotpcrel) @i, $noreg, implicit-def dead $eflags :: (load (s64) from got)
+# NOAPXREL: %3:gr64_norex2 = COPY %0
+# NOAPXREL: %1:gr64_norex2 = ADD64rm %3, $rip, 1, $noreg, target-flags(x86-gotpcrel) @i, $noreg, implicit-def dead $eflags
+...
+---
+name: add64rm_nd_gottpoff
alignment: 16
tracksRegLiveness: true
registers:
diff --git a/llvm/test/CodeGen/X86/atomic-load-store.ll b/llvm/test/CodeGen/X86/atomic-load-store.ll
index 5bce440..45277ce 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store.ll
@@ -1,10 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.7.0 -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.7.0 -verify-machineinstrs -O0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
define void @test1(ptr %ptr, i32 %val1) {
; CHECK-LABEL: test1:
-; CHECK: ## %bb.0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xchgl %esi, (%rdi)
; CHECK-NEXT: retq
store atomic i32 %val1, ptr %ptr seq_cst, align 4
@@ -13,7 +19,7 @@ define void @test1(ptr %ptr, i32 %val1) {
define void @test2(ptr %ptr, i32 %val1) {
; CHECK-LABEL: test2:
-; CHECK: ## %bb.0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, (%rdi)
; CHECK-NEXT: retq
store atomic i32 %val1, ptr %ptr release, align 4
@@ -22,9 +28,12 @@ define void @test2(ptr %ptr, i32 %val1) {
define i32 @test3(ptr %ptr) {
; CHECK-LABEL: test3:
-; CHECK: ## %bb.0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl (%rdi), %eax
; CHECK-NEXT: retq
%val = load atomic i32, ptr %ptr seq_cst, align 4
ret i32 %val
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-O0: {{.*}}
+; CHECK-O3: {{.*}}
diff --git a/llvm/test/CodeGen/X86/avx10_2-cmp.ll b/llvm/test/CodeGen/X86/avx10_2-cmp.ll
index 140a20c..0f90f1a 100644
--- a/llvm/test/CodeGen/X86/avx10_2-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx10_2-cmp.ll
@@ -276,3 +276,24 @@ if.then66: ; preds = %entry
if.end70: ; preds = %entry
ret i32 0
}
+
+define i1 @constrained_fcmp() {
+; X64-LABEL: constrained_fcmp:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; X64-NEXT: vucomxsd %xmm0, %xmm0
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+;
+; X86-LABEL: constrained_fcmp:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; X86-NEXT: vucomxsd %xmm0, %xmm0
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+entry:
+ %0 = tail call i1 @llvm.experimental.constrained.fcmps.f64(double 0.000000e+00, double 0.000000e+00, metadata !"une", metadata !"fpexcept.strict")
+ ret i1 %0
+}
+
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/movtopush.ll b/llvm/test/CodeGen/X86/movtopush.ll
index a491986..22929fa4b 100644
--- a/llvm/test/CodeGen/X86/movtopush.ll
+++ b/llvm/test/CodeGen/X86/movtopush.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -mtriple=i686-windows | FileCheck %s -check-prefix=NORMAL
; RUN: llc < %s -mtriple=i686-windows -no-x86-call-frame-opt | FileCheck %s -check-prefix=NOPUSH
; RUN: llc < %s -mtriple=x86_64-windows | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-uefi | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=i686-pc-linux | FileCheck %s -check-prefix=LINUX
%class.Class = type { i32 }
diff --git a/llvm/test/CodeGen/X86/musttail-tailcc.ll b/llvm/test/CodeGen/X86/musttail-tailcc.ll
index 718de84..fae698d 100644
--- a/llvm/test/CodeGen/X86/musttail-tailcc.ll
+++ b/llvm/test/CodeGen/X86/musttail-tailcc.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-uefi | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s -check-prefix=X86
; tailcc will turn all of these musttail calls into tail calls.
@@ -12,9 +12,9 @@ define dso_local tailcc i32 @tailcaller(i32 %in1, i32 %in2) nounwind {
; X64: # %bb.0: # %entry
; X64-NEXT: jmp tailcallee # TAILCALL
;
-; X32-LABEL: tailcaller:
-; X32: # %bb.0: # %entry
-; X32-NEXT: jmp tailcallee # TAILCALL
+; X86-LABEL: tailcaller:
+; X86: # %bb.0: # %entry
+; X86-NEXT: jmp tailcallee # TAILCALL
entry:
%tmp11 = musttail call tailcc i32 @tailcallee(i32 %in1, i32 %in2)
ret i32 %tmp11
@@ -27,9 +27,9 @@ define tailcc noalias ptr @noalias_caller() nounwind {
; X64: # %bb.0:
; X64-NEXT: jmp alias_callee # TAILCALL
;
-; X32-LABEL: noalias_caller:
-; X32: # %bb.0:
-; X32-NEXT: jmp alias_callee # TAILCALL
+; X86-LABEL: noalias_caller:
+; X86: # %bb.0:
+; X86-NEXT: jmp alias_callee # TAILCALL
%p = musttail call tailcc ptr @alias_callee()
ret ptr %p
}
@@ -41,9 +41,9 @@ define dso_local tailcc ptr @alias_caller() nounwind {
; X64: # %bb.0:
; X64-NEXT: jmp noalias_callee # TAILCALL
;
-; X32-LABEL: alias_caller:
-; X32: # %bb.0:
-; X32-NEXT: jmp noalias_callee # TAILCALL
+; X86-LABEL: alias_caller:
+; X86: # %bb.0:
+; X86-NEXT: jmp noalias_callee # TAILCALL
%p = musttail call tailcc noalias ptr @noalias_callee()
ret ptr %p
}
@@ -53,18 +53,18 @@ define dso_local tailcc void @void_test(i32, i32, i32, i32) {
; X64: # %bb.0: # %entry
; X64-NEXT: jmp void_test # TAILCALL
;
-; X32-LABEL: void_test:
-; X32: # %bb.0: # %entry
-; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: .cfi_offset %esi, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: jmp void_test # TAILCALL
+; X86-LABEL: void_test:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: popl %esi
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: jmp void_test # TAILCALL
entry:
musttail call tailcc void @void_test( i32 %0, i32 %1, i32 %2, i32 %3)
ret void
@@ -75,18 +75,18 @@ define dso_local tailcc i1 @i1test(i32, i32, i32, i32) {
; X64: # %bb.0: # %entry
; X64-NEXT: jmp i1test # TAILCALL
;
-; X32-LABEL: i1test:
-; X32: # %bb.0: # %entry
-; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: .cfi_offset %esi, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: jmp i1test # TAILCALL
+; X86-LABEL: i1test:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: popl %esi
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: jmp i1test # TAILCALL
entry:
%4 = musttail call tailcc i1 @i1test( i32 %0, i32 %1, i32 %2, i32 %3)
ret i1 %4
diff --git a/llvm/test/CodeGen/X86/tailcall-tailcc.ll b/llvm/test/CodeGen/X86/tailcall-tailcc.ll
index 648d6b3..adb032a 100644
--- a/llvm/test/CodeGen/X86/tailcall-tailcc.ll
+++ b/llvm/test/CodeGen/X86/tailcall-tailcc.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-windows-msvc | FileCheck %s -check-prefix=UEFI64
; RUN: llc < %s -mtriple=x86_64-uefi | FileCheck %s -check-prefix=UEFI64
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s -check-prefix=X86
; With -tailcallopt, CodeGen guarantees a tail call optimization
; for all of these.
@@ -26,15 +26,15 @@ define dso_local tailcc i32 @tailcaller(i32 %in1, i32 %in2) nounwind {
; UEFI64-NEXT: addq $40, %rsp
; UEFI64-NEXT: jmp tailcallee # TAILCALL
;
-; X32-LABEL: tailcaller:
-; X32: # %bb.0: # %entry
-; X32-NEXT: subl $16, %esp
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT: addl $8, %esp
-; X32-NEXT: jmp tailcallee # TAILCALL
+; X86-LABEL: tailcaller:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: jmp tailcallee # TAILCALL
entry:
%tmp11 = tail call tailcc i32 @tailcallee(i32 %in1, i32 %in2, i32 %in1, i32 %in2)
ret i32 %tmp11
@@ -53,9 +53,9 @@ define tailcc noalias ptr @noalias_caller() nounwind {
; UEFI64: # %bb.0:
; UEFI64-NEXT: jmp alias_callee # TAILCALL
;
-; X32-LABEL: noalias_caller:
-; X32: # %bb.0:
-; X32-NEXT: jmp alias_callee # TAILCALL
+; X86-LABEL: noalias_caller:
+; X86: # %bb.0:
+; X86-NEXT: jmp alias_callee # TAILCALL
%p = tail call tailcc ptr @alias_callee()
ret ptr %p
}
@@ -73,9 +73,9 @@ define dso_local tailcc ptr @alias_caller() nounwind {
; UEFI64: # %bb.0:
; UEFI64-NEXT: jmp noalias_callee # TAILCALL
;
-; X32-LABEL: alias_caller:
-; X32: # %bb.0:
-; X32-NEXT: jmp noalias_callee # TAILCALL
+; X86-LABEL: alias_caller:
+; X86: # %bb.0:
+; X86-NEXT: jmp noalias_callee # TAILCALL
%p = tail call tailcc noalias ptr @noalias_callee()
ret ptr %p
}
@@ -93,9 +93,9 @@ define dso_local tailcc i32 @ret_undef() nounwind {
; UEFI64: # %bb.0:
; UEFI64-NEXT: jmp i32_callee # TAILCALL
;
-; X32-LABEL: ret_undef:
-; X32: # %bb.0:
-; X32-NEXT: jmp i32_callee # TAILCALL
+; X86-LABEL: ret_undef:
+; X86: # %bb.0:
+; X86-NEXT: jmp i32_callee # TAILCALL
%p = tail call tailcc i32 @i32_callee()
ret i32 undef
}
@@ -113,9 +113,9 @@ define dso_local tailcc i32 @noret() nounwind {
; UEFI64: # %bb.0:
; UEFI64-NEXT: jmp does_not_return # TAILCALL
;
-; X32-LABEL: noret:
-; X32: # %bb.0:
-; X32-NEXT: jmp does_not_return # TAILCALL
+; X86-LABEL: noret:
+; X86: # %bb.0:
+; X86-NEXT: jmp does_not_return # TAILCALL
tail call tailcc void @does_not_return()
unreachable
}
@@ -139,22 +139,22 @@ define dso_local tailcc void @void_test(i32, i32, i32, i32) {
; UEFI64-NEXT: .seh_endepilogue
; UEFI64-NEXT: jmp void_test # TAILCALL
;
-; X32-LABEL: void_test:
-; X32: # %bb.0: # %entry
-; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: subl $8, %esp
-; X32-NEXT: .cfi_def_cfa_offset 16
-; X32-NEXT: .cfi_offset %esi, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT: addl $8, %esp
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: jmp void_test # TAILCALL
+; X86-LABEL: void_test:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: .cfi_def_cfa_offset 16
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: popl %esi
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: jmp void_test # TAILCALL
entry:
tail call tailcc void @void_test( i32 %0, i32 %1, i32 %2, i32 %3)
ret void
@@ -179,22 +179,22 @@ define dso_local tailcc i1 @i1test(i32, i32, i32, i32) {
; UEFI64-NEXT: .seh_endepilogue
; UEFI64-NEXT: jmp i1test # TAILCALL
;
-; X32-LABEL: i1test:
-; X32: # %bb.0: # %entry
-; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: subl $8, %esp
-; X32-NEXT: .cfi_def_cfa_offset 16
-; X32-NEXT: .cfi_offset %esi, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT: addl $8, %esp
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: jmp i1test # TAILCALL
+; X86-LABEL: i1test:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: .cfi_def_cfa_offset 16
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: popl %esi
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: jmp i1test # TAILCALL
entry:
%4 = tail call tailcc i1 @i1test( i32 %0, i32 %1, i32 %2, i32 %3)
ret i1 %4
diff --git a/llvm/test/CodeGen/X86/tailcc-fastcc.ll b/llvm/test/CodeGen/X86/tailcc-fastcc.ll
index 6ede167..13cb577 100644
--- a/llvm/test/CodeGen/X86/tailcc-fastcc.ll
+++ b/llvm/test/CodeGen/X86/tailcc-fastcc.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -tailcallopt < %s -mtriple=x86_64-unknown-unknown | FileCheck %s -check-prefix=X64
; RUN: llc -tailcallopt < %s -mtriple=x86_64-uefi | FileCheck %s -check-prefix=UEFI64
-; RUN: llc -tailcallopt < %s -mtriple=i686-unknown-unknown | FileCheck %s -check-prefix=X32
+; RUN: llc -tailcallopt < %s -mtriple=i686-unknown-unknown | FileCheck %s -check-prefix=X86
; llc -tailcallopt should not enable tail calls from fastcc to tailcc or vice versa
@@ -24,12 +24,12 @@ define fastcc i32 @tailcaller1(i32 %in1, i32 %in2) nounwind {
; UEFI64-NEXT: callq tailcallee1
; UEFI64-NEXT: retq $40
;
-; X32-LABEL: tailcaller1:
-; X32: # %bb.0: # %entry
-; X32-NEXT: pushl %edx
-; X32-NEXT: pushl %ecx
-; X32-NEXT: calll tailcallee1@PLT
-; X32-NEXT: retl
+; X86-LABEL: tailcaller1:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edx
+; X86-NEXT: pushl %ecx
+; X86-NEXT: calll tailcallee1@PLT
+; X86-NEXT: retl
entry:
%tmp11 = tail call tailcc i32 @tailcallee1(i32 %in1, i32 %in2, i32 %in1, i32 %in2)
ret i32 %tmp11
@@ -54,12 +54,12 @@ define tailcc i32 @tailcaller2(i32 %in1, i32 %in2) nounwind {
; UEFI64-NEXT: callq tailcallee2
; UEFI64-NEXT: retq $40
;
-; X32-LABEL: tailcaller2:
-; X32: # %bb.0: # %entry
-; X32-NEXT: pushl %edx
-; X32-NEXT: pushl %ecx
-; X32-NEXT: calll tailcallee2@PLT
-; X32-NEXT: retl
+; X86-LABEL: tailcaller2:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edx
+; X86-NEXT: pushl %ecx
+; X86-NEXT: calll tailcallee2@PLT
+; X86-NEXT: retl
entry:
%tmp11 = tail call fastcc i32 @tailcallee2(i32 %in1, i32 %in2, i32 %in1, i32 %in2)
ret i32 %tmp11
diff --git a/llvm/test/CodeGen/X86/win64_eh.ll b/llvm/test/CodeGen/X86/win64_eh.ll
index 67088b4..63aa9fc 100644
--- a/llvm/test/CodeGen/X86/win64_eh.ll
+++ b/llvm/test/CodeGen/X86/win64_eh.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -O0 -mattr=sse2 -mtriple=x86_64-pc-windows-itanium | FileCheck %s -check-prefix=WIN64 -check-prefix=NORM
+; RUN: llc < %s -O0 -mattr=sse2 -mtriple=x86_64-uefi | FileCheck %s -check-prefix=WIN64 -check-prefix=NORM
; RUN: llc < %s -O0 -mattr=sse2 -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=WIN64 -check-prefix=NORM
; RUN: llc < %s -O0 -mattr=sse2 -mtriple=x86_64-pc-mingw32 -mcpu=atom | FileCheck %s -check-prefix=WIN64 -check-prefix=ATOM
diff --git a/llvm/test/CodeGen/X86/win64_frame.ll b/llvm/test/CodeGen/X86/win64_frame.ll
index 9cb3ba8..c4b36c5 100644
--- a/llvm/test/CodeGen/X86/win64_frame.ll
+++ b/llvm/test/CodeGen/X86/win64_frame.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-pc-win32 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-pc-win32 -mattr=+sahf | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-uefi | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-uefi -mattr=+sahf | FileCheck %s
define i32 @f1(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) "frame-pointer"="all" {
; CHECK-LABEL: f1:
diff --git a/llvm/test/CodeGen/X86/win_chkstk.ll b/llvm/test/CodeGen/X86/win_chkstk.ll
index 5b9c35efd..5a2809ed 100644
--- a/llvm/test/CodeGen/X86/win_chkstk.ll
+++ b/llvm/test/CodeGen/X86/win_chkstk.ll
@@ -1,6 +1,8 @@
; RUN: llc < %s -mtriple=i686-pc-win32 | FileCheck %s -check-prefix=WIN_X32
; RUN: llc < %s -mtriple=x86_64-pc-win32 | FileCheck %s -check-prefix=WIN_X64
; RUN: llc < %s -mtriple=x86_64-pc-win32 -code-model=large | FileCheck %s -check-prefix=WIN64_LARGE
+; RUN: llc < %s -mtriple=x86_64-uefi | FileCheck %s -check-prefix=WIN_X64
+; RUN: llc < %s -mtriple=x86_64-uefi -code-model=large | FileCheck %s -check-prefix=WIN64_LARGE
; RUN: llc < %s -mtriple=i686-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X32
; RUN: llc < %s -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X64
; RUN: llc < %s -mtriple=i386-pc-linux | FileCheck %s -check-prefix=LINUX
diff --git a/llvm/test/DebugInfo/Generic/debug-names-asm-label.ll b/llvm/test/DebugInfo/Generic/debug-names-asm-label.ll
new file mode 100644
index 0000000..db3665d
--- /dev/null
+++ b/llvm/test/DebugInfo/Generic/debug-names-asm-label.ll
@@ -0,0 +1,54 @@
+; Tests the mangling escape prefix gets stripped from the linkage name.
+;
+; RUN: %llc_dwarf -accel-tables=Dwarf -dwarf-linkage-names=All -filetype=obj -o %t < %s
+;
+; RUN: llvm-dwarfdump -debug-info -debug-names %t | FileCheck %s
+; RUN: llvm-dwarfdump -debug-names -verify %t | FileCheck --check-prefix=VERIFY %s
+
+; CHECK: .debug_info contents:
+; CHECK: DW_AT_linkage_name ("bar")
+; CHECK: .debug_names contents:
+; CHECK: String: {{.*}} "bar"
+
+; VERIFY: No errors.
+
+; Input generated from the following C++ code using
+; clang -g -S -emit-llvm -target aarch64-apple-macos
+
+; void foo() asm("bar");
+; void foo() {}
+;
+; void g() { foo(); }
+
+define void @"\01bar"() !dbg !9 {
+entry:
+ ret void, !dbg !13
+}
+
+define void @_Z1gv() !dbg !14 {
+entry:
+ call void @"\01bar"(), !dbg !15
+ ret void, !dbg !16
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3, !4, !5, !6, !7}
+!llvm.ident = !{!8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 21.0.0git", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: Apple, sysroot: "/")
+!1 = !DIFile(filename: "asm.cpp", directory: "/tmp", checksumkind: CSK_MD5, checksum: "d053f9249cc5548d446ceb58411ad625")
+!2 = !{i32 7, !"Dwarf Version", i32 5}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = !{i32 8, !"PIC Level", i32 2}
+!6 = !{i32 7, !"uwtable", i32 1}
+!7 = !{i32 7, !"frame-pointer", i32 1}
+!8 = !{!"clang version 21.0.0git"}
+!9 = distinct !DISubprogram(name: "foo", linkageName: "\01bar", scope: !10, file: !10, line: 2, type: !11, scopeLine: 2, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0)
+!10 = !DIFile(filename: "asm.cpp", directory: "/tmp", checksumkind: CSK_MD5, checksum: "d053f9249cc5548d446ceb58411ad625")
+!11 = !DISubroutineType(types: !12)
+!12 = !{null}
+!13 = !DILocation(line: 2, column: 13, scope: !9)
+!14 = distinct !DISubprogram(name: "g", linkageName: "_Z1gv", scope: !10, file: !10, line: 4, type: !11, scopeLine: 4, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0)
+!15 = !DILocation(line: 4, column: 12, scope: !14)
+!16 = !DILocation(line: 4, column: 19, scope: !14)
diff --git a/llvm/test/DebugInfo/Generic/multi-variant.ll b/llvm/test/DebugInfo/Generic/multi-variant.ll
new file mode 100644
index 0000000..1c680b3
--- /dev/null
+++ b/llvm/test/DebugInfo/Generic/multi-variant.ll
@@ -0,0 +1,74 @@
+; RUN: %llc_dwarf -O0 -filetype=obj < %s > %t
+; RUN: llvm-dwarfdump -v -debug-info %t | FileCheck %s
+
+; Check for a variant part where a variant has multiple members.
+
+; CHECK: DW_AT_name [DW_FORM_str{{[a-z]+}}] ({{.*}} = "Discr")
+; CHECK: DW_TAG_variant_part
+; CHECK-NOT: TAG
+; CHECK: DW_AT_discr [DW_FORM_ref4] (cu + {{0x[0-9a-fA-F]+}} => {[[OFFSET:0x[0-9a-fA-F]+]]})
+; CHECK: DW_TAG_variant
+; CHECK: DW_AT_discr_value [DW_FORM_data1] (0x4a)
+; CHECK: DW_TAG_member
+; CHECK: DW_AT_name [DW_FORM_str{{[a-z]+}}] ({{.*}} = "field0")
+; CHECK: DW_AT_type
+; CHECK: DW_AT_alignment
+; CHECK: DW_AT_data_member_location [DW_FORM_data1] (0x00)
+; CHECK: DW_TAG_member
+; CHECK: DW_AT_name [DW_FORM_str{{[a-z]+}}] ({{.*}} = "field1")
+; CHECK: DW_AT_type
+; CHECK: DW_AT_alignment
+; CHECK: DW_AT_data_member_location [DW_FORM_data1] (0x08)
+; CHECK: DW_TAG_variant
+; CHECK: DW_AT_discr_value [DW_FORM_data1] (0x4b)
+; CHECK: DW_TAG_member
+; CHECK: DW_AT_name [DW_FORM_str{{[a-z]+}}] ({{.*}} = "field2")
+; CHECK: DW_AT_type
+; CHECK: DW_AT_alignment
+; CHECK: DW_AT_data_member_location [DW_FORM_data1] (0x00)
+
+%F = type { [0 x i8], ptr, [8 x i8] }
+
+define internal void @_ZN2e34main17h934ff72f9a38d4bbE() unnamed_addr #0 !dbg !5 {
+start:
+ %qq = alloca %F, align 8
+ call void @llvm.dbg.declare(metadata ptr %qq, metadata !10, metadata !24), !dbg !25
+ store ptr null, ptr %qq, !dbg !25
+ ret void, !dbg !26
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #0
+
+attributes #0 = { nounwind uwtable }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.dbg.cu = !{!2}
+
+!0 = !{i32 1, !"PIE Level", i32 2}
+!1 = !{i32 2, !"Debug Info Version", i32 3}
+!2 = distinct !DICompileUnit(language: DW_LANG_Ada95, file: !3, producer: "gnat-llvm", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4)
+!3 = !DIFile(filename: "e3.rs", directory: "/home/tromey/Ada")
+!4 = !{}
+!5 = distinct !DISubprogram(name: "main", linkageName: "_ZN2e34mainE", scope: !6, file: !3, line: 2, type: !8, scopeLine: 2, flags: DIFlagPrototyped, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagMainSubprogram, isOptimized: false, unit: !2, templateParams: !4, retainedNodes: !4)
+!6 = !DINamespace(name: "e3", scope: null)
+!7 = !DIFile(filename: "<unknown>", directory: "")
+!8 = !DISubroutineType(types: !9)
+!9 = !{null}
+!10 = !DILocalVariable(name: "qq", scope: !11, file: !3, line: 3, type: !12, align: 64)
+!11 = distinct !DILexicalBlock(scope: !5, file: !3, line: 3, column: 4)
+!12 = !DICompositeType(tag: DW_TAG_structure_type, name: "F", scope: !6, file: !7, size: 128, align: 64, elements: !13, identifier: "7ce1efff6b82281ab9ceb730566e7e20")
+!13 = !{!14, !15}
+!14 = !DIDerivedType(tag: DW_TAG_member, name: "Discr", scope: !12, file: !7, baseType: !23, size: 64, align: 64)
+!15 = !DICompositeType(tag: DW_TAG_variant_part, scope: !12, file: !7, size: 128, align: 64, elements: !16, identifier: "7ce1efff6b82281ab9ceb730566e7e20", discriminator: !14)
+!16 = !{!17, !22}
+!17 = !DIDerivedType(tag: DW_TAG_member, scope: !15, file: !7, baseType: !18, size: 128, align: 64, extraData: i32 74)
+!18 = !DICompositeType(tag: DW_TAG_variant, scope: !15, file: !7, size: 128, align: 64, elements: !19)
+!19 = !{!20, !21}
+!20 = !DIDerivedType(tag: DW_TAG_member, name: "field0", scope: !18, file: !7, baseType: !23, size: 64, align: 64, offset: 0)
+!21 = !DIDerivedType(tag: DW_TAG_member, name: "field1", scope: !18, file: !7, baseType: !23, size: 64, align: 64, offset: 64)
+!22 = !DIDerivedType(tag: DW_TAG_member, name: "field2", scope: !15, file: !7, baseType: !23, size: 64, align: 64, offset: 0, extraData: i32 75)
+!23 = !DIBasicType(name: "u64", size: 64, encoding: DW_ATE_unsigned)
+!24 = !DIExpression()
+!25 = !DILocation(line: 3, scope: !11)
+!26 = !DILocation(line: 4, scope: !5)
diff --git a/llvm/test/MC/Disassembler/Sparc/sparc-ua-osa.txt b/llvm/test/MC/Disassembler/Sparc/sparc-ua-osa.txt
new file mode 100644
index 0000000..bc32e7d
--- /dev/null
+++ b/llvm/test/MC/Disassembler/Sparc/sparc-ua-osa.txt
@@ -0,0 +1,31 @@
+# RUN: llvm-mc --disassemble %s -triple=sparcv9-unknown-linux -mattr=+ua2005,+ua2007 | FileCheck %s
+
+## UA 2005 instructions.
+
+# CHECK: allclean
+0x85,0x88,0x00,0x00
+# CHECK: invalw
+0x8b,0x88,0x00,0x00
+# CHECK: otherw
+0x87,0x88,0x00,0x00
+# CHECK: normalw
+0x89,0x88,0x00,0x00
+
+## UA 2007 instructions.
+
+# CHECK: fmadds %f1, %f3, %f5, %f7
+0x8f,0xb8,0x4a,0x23
+# CHECK: fmaddd %f0, %f2, %f4, %f6
+0x8d,0xb8,0x08,0x42
+# CHECK: fmsubs %f1, %f3, %f5, %f7
+0x8f,0xb8,0x4a,0xa3
+# CHECK: fmsubd %f0, %f2, %f4, %f6
+0x8d,0xb8,0x08,0xc2
+# CHECK: fnmadds %f1, %f3, %f5, %f7
+0x8f,0xb8,0x4b,0xa3
+# CHECK: fnmaddd %f0, %f2, %f4, %f6
+0x8d,0xb8,0x09,0xc2
+# CHECK: fnmsubs %f1, %f3, %f5, %f7
+0x8f,0xb8,0x4b,0x23
+# CHECK: fnmsubd %f0, %f2, %f4, %f6
+0x8d,0xb8,0x09,0x42
diff --git a/llvm/test/MC/RISCV/xandesvpackfph-valid.s b/llvm/test/MC/RISCV/xandesvpackfph-valid.s
new file mode 100644
index 0000000..955e713
--- /dev/null
+++ b/llvm/test/MC/RISCV/xandesvpackfph-valid.s
@@ -0,0 +1,39 @@
+# XAndesVPackFPH - Andes Vector Packed FP16 Extension
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+xandesvpackfph -show-encoding \
+# RUN: | FileCheck -check-prefixes=CHECK-ASM %s
+# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+xandesvpackfph < %s \
+# RUN: | llvm-objdump --mattr=+xandesvpackfph -M no-aliases -d -r - \
+# RUN: | FileCheck -check-prefixes=CHECK-OBJ %s
+# RUN: not llvm-mc -triple=riscv32 -show-encoding %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+xandesvpackfph -show-encoding \
+# RUN: | FileCheck -check-prefixes=CHECK-ASM %s
+# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+xandesvpackfph < %s \
+# RUN: | llvm-objdump --mattr=+xandesvpackfph -M no-aliases -d -r - \
+# RUN: | FileCheck -check-prefixes=CHECK-OBJ %s
+# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+# CHECK-OBJ: nds.vfpmadt.vf v8, fa0, v10
+# CHECK-ASM: nds.vfpmadt.vf v8, fa0, v10
+# CHECK-ASM: encoding: [0x5b,0x44,0xa5,0x0a]
+# CHECK-ERROR: instruction requires the following: 'XAndesVPackFPH' (Andes Vector Packed FP16 Extension){{$}}
+nds.vfpmadt.vf v8, fa0, v10
+
+# CHECK-OBJ: nds.vfpmadt.vf v8, fa0, v10, v0.t
+# CHECK-ASM: nds.vfpmadt.vf v8, fa0, v10, v0.t
+# CHECK-ASM: encoding: [0x5b,0x44,0xa5,0x08]
+# CHECK-ERROR: instruction requires the following: 'XAndesVPackFPH' (Andes Vector Packed FP16 Extension){{$}}
+nds.vfpmadt.vf v8, fa0, v10, v0.t
+
+# CHECK-OBJ: nds.vfpmadb.vf v8, fa0, v10
+# CHECK-ASM: nds.vfpmadb.vf v8, fa0, v10
+# CHECK-ASM: encoding: [0x5b,0x44,0xa5,0x0e]
+# CHECK-ERROR: instruction requires the following: 'XAndesVPackFPH' (Andes Vector Packed FP16 Extension){{$}}
+nds.vfpmadb.vf v8, fa0, v10
+
+# CHECK-OBJ: nds.vfpmadb.vf v8, fa0, v10, v0.t
+# CHECK-ASM: nds.vfpmadb.vf v8, fa0, v10, v0.t
+# CHECK-ASM: encoding: [0x5b,0x44,0xa5,0x0c]
+# CHECK-ERROR: instruction requires the following: 'XAndesVPackFPH' (Andes Vector Packed FP16 Extension){{$}}
+nds.vfpmadb.vf v8, fa0, v10, v0.t
diff --git a/llvm/test/MC/Sparc/sparc-ua2005.s b/llvm/test/MC/Sparc/sparc-ua2005.s
new file mode 100644
index 0000000..b07c99a
--- /dev/null
+++ b/llvm/test/MC/Sparc/sparc-ua2005.s
@@ -0,0 +1,17 @@
+! RUN: not llvm-mc %s -triple=sparcv9 -show-encoding 2>&1 | FileCheck %s --check-prefixes=NO-UA2005 --implicit-check-not=error:
+! RUN: llvm-mc %s -triple=sparcv9 -mattr=+ua2005 -show-encoding | FileCheck %s --check-prefixes=UA2005
+
+!! UA 2005 instructions.
+
+! NO-UA2005: error: instruction requires a CPU feature not currently enabled
+! UA2005: allclean ! encoding: [0x85,0x88,0x00,0x00]
+allclean
+! NO-UA2005: error: instruction requires a CPU feature not currently enabled
+! UA2005: invalw ! encoding: [0x8b,0x88,0x00,0x00]
+invalw
+! NO-UA2005: error: instruction requires a CPU feature not currently enabled
+! UA2005: otherw ! encoding: [0x87,0x88,0x00,0x00]
+otherw
+! NO-UA2005: error: instruction requires a CPU feature not currently enabled
+! UA2005: normalw ! encoding: [0x89,0x88,0x00,0x00]
+normalw
diff --git a/llvm/test/MC/Sparc/sparc-ua2007.s b/llvm/test/MC/Sparc/sparc-ua2007.s
new file mode 100644
index 0000000..6b41f92
--- /dev/null
+++ b/llvm/test/MC/Sparc/sparc-ua2007.s
@@ -0,0 +1,30 @@
+! RUN: not llvm-mc %s -triple=sparcv9 -show-encoding 2>&1 | FileCheck %s --check-prefixes=NO-UA2007 --implicit-check-not=error:
+! RUN: llvm-mc %s -triple=sparcv9 -mattr=+ua2007 -show-encoding | FileCheck %s --check-prefixes=UA2007
+
+!! UA 2007 instructions.
+
+! NO-UA2007: error: instruction requires a CPU feature not currently enabled
+! UA2007: fmadds %f1, %f3, %f5, %f7 ! encoding: [0x8f,0xb8,0x4a,0x23]
+fmadds %f1, %f3, %f5, %f7
+! NO-UA2007: error: instruction requires a CPU feature not currently enabled
+! UA2007: fmaddd %f0, %f2, %f4, %f6 ! encoding: [0x8d,0xb8,0x08,0x42]
+fmaddd %f0, %f2, %f4, %f6
+! NO-UA2007: error: instruction requires a CPU feature not currently enabled
+! UA2007: fmsubs %f1, %f3, %f5, %f7 ! encoding: [0x8f,0xb8,0x4a,0xa3]
+fmsubs %f1, %f3, %f5, %f7
+! NO-UA2007: error: instruction requires a CPU feature not currently enabled
+! UA2007: fmsubd %f0, %f2, %f4, %f6 ! encoding: [0x8d,0xb8,0x08,0xc2]
+fmsubd %f0, %f2, %f4, %f6
+
+! NO-UA2007: error: instruction requires a CPU feature not currently enabled
+! UA2007: fnmadds %f1, %f3, %f5, %f7 ! encoding: [0x8f,0xb8,0x4b,0xa3]
+fnmadds %f1, %f3, %f5, %f7
+! NO-UA2007: error: instruction requires a CPU feature not currently enabled
+! UA2007: fnmaddd %f0, %f2, %f4, %f6 ! encoding: [0x8d,0xb8,0x09,0xc2]
+fnmaddd %f0, %f2, %f4, %f6
+! NO-UA2007: error: instruction requires a CPU feature not currently enabled
+! UA2007: fnmsubs %f1, %f3, %f5, %f7 ! encoding: [0x8f,0xb8,0x4b,0x23]
+fnmsubs %f1, %f3, %f5, %f7
+! NO-UA2007: error: instruction requires a CPU feature not currently enabled
+! UA2007: fnmsubd %f0, %f2, %f4, %f6 ! encoding: [0x8d,0xb8,0x09,0x42]
+fnmsubd %f0, %f2, %f4, %f6
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
index 6981b43..89c73c2 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
@@ -1852,300 +1852,6 @@ define void @exp_compr_disabled_inputs_to_undef(<2 x half> %xy, <2 x half> %zw)
}
; --------------------------------------------------------------------
-; llvm.amdgcn.fmed3
-; --------------------------------------------------------------------
-
-declare float @llvm.amdgcn.fmed3.f32(float, float, float) nounwind readnone
-
-define float @fmed3_f32(float %x, float %y, float %z) {
-; CHECK-LABEL: @fmed3_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X:%.*]], float [[Y:%.*]], float [[Z:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float %z)
- ret float %med3
-}
-
-define float @fmed3_canonicalize_x_c0_c1_f32(float %x) {
-; CHECK-LABEL: @fmed3_canonicalize_x_c0_c1_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X:%.*]], float 0.000000e+00, float 1.000000e+00)
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0.0, float 1.0)
- ret float %med3
-}
-
-define float @fmed3_canonicalize_c0_x_c1_f32(float %x) {
-; CHECK-LABEL: @fmed3_canonicalize_c0_x_c1_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X:%.*]], float 0.000000e+00, float 1.000000e+00)
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float %x, float 1.0)
- ret float %med3
-}
-
-define float @fmed3_canonicalize_c0_c1_x_f32(float %x) {
-; CHECK-LABEL: @fmed3_canonicalize_c0_c1_x_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X:%.*]], float 0.000000e+00, float 1.000000e+00)
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float %x)
- ret float %med3
-}
-
-define float @fmed3_canonicalize_x_y_c_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_canonicalize_x_y_c_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X:%.*]], float [[Y:%.*]], float 1.000000e+00)
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 1.0)
- ret float %med3
-}
-
-define float @fmed3_canonicalize_x_c_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_canonicalize_x_c_y_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X:%.*]], float [[Y:%.*]], float 1.000000e+00)
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 1.0, float %y)
- ret float %med3
-}
-
-define float @fmed3_canonicalize_c_x_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_canonicalize_c_x_y_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X:%.*]], float [[Y:%.*]], float 1.000000e+00)
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 1.0, float %x, float %y)
- ret float %med3
-}
-
-define float @fmed3_undef_x_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_undef_x_y_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float undef, float %x, float %y)
- ret float %med3
-}
-
-define float @fmed3_fmf_undef_x_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_fmf_undef_x_y_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call nnan float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call nnan float @llvm.amdgcn.fmed3.f32(float undef, float %x, float %y)
- ret float %med3
-}
-
-define float @fmed3_x_undef_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_x_undef_y_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float undef, float %y)
- ret float %med3
-}
-
-define float @fmed3_x_y_undef_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_x_y_undef_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float undef)
- ret float %med3
-}
-
-define float @fmed3_qnan0_x_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_qnan0_x_y_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000000000000, float %x, float %y)
- ret float %med3
-}
-
-define float @fmed3_x_qnan0_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_x_qnan0_y_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8000000000000, float %y)
- ret float %med3
-}
-
-define float @fmed3_x_y_qnan0_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_x_y_qnan0_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF8000000000000)
- ret float %med3
-}
-
-define float @fmed3_qnan1_x_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_qnan1_x_y_f32(
-; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: ret float [[MED3]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000100000000, float %x, float %y)
- ret float %med3
-}
-
-; This can return any of the qnans.
-define float @fmed3_qnan0_qnan1_qnan2_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_qnan0_qnan1_qnan2_f32(
-; CHECK-NEXT: ret float 0x7FF8030000000000
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000100000000, float 0x7FF8002000000000, float 0x7FF8030000000000)
- ret float %med3
-}
-
-define float @fmed3_constant_src0_0_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_constant_src0_0_f32(
-; CHECK-NEXT: ret float 5.000000e-01
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.5, float -1.0, float 4.0)
- ret float %med3
-}
-
-define float @fmed3_constant_src0_1_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_constant_src0_1_f32(
-; CHECK-NEXT: ret float 5.000000e-01
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.5, float 4.0, float -1.0)
- ret float %med3
-}
-
-define float @fmed3_constant_src1_0_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_constant_src1_0_f32(
-; CHECK-NEXT: ret float 5.000000e-01
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float -1.0, float 0.5, float 4.0)
- ret float %med3
-}
-
-define float @fmed3_constant_src1_1_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_constant_src1_1_f32(
-; CHECK-NEXT: ret float 5.000000e-01
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 4.0, float 0.5, float -1.0)
- ret float %med3
-}
-
-define float @fmed3_constant_src2_0_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_constant_src2_0_f32(
-; CHECK-NEXT: ret float 5.000000e-01
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float -1.0, float 4.0, float 0.5)
- ret float %med3
-}
-
-define float @fmed3_constant_src2_1_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_constant_src2_1_f32(
-; CHECK-NEXT: ret float 5.000000e-01
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 4.0, float -1.0, float 0.5)
- ret float %med3
-}
-
-define float @fmed3_x_qnan0_qnan1_f32(float %x) {
-; CHECK-LABEL: @fmed3_x_qnan0_qnan1_f32(
-; CHECK-NEXT: ret float [[X:%.*]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8001000000000, float 0x7FF8002000000000)
- ret float %med3
-}
-
-define float @fmed3_qnan0_x_qnan1_f32(float %x) {
-; CHECK-LABEL: @fmed3_qnan0_x_qnan1_f32(
-; CHECK-NEXT: ret float [[X:%.*]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float %x, float 0x7FF8002000000000)
- ret float %med3
-}
-
-define float @fmed3_qnan0_qnan1_x_f32(float %x) {
-; CHECK-LABEL: @fmed3_qnan0_qnan1_x_f32(
-; CHECK-NEXT: ret float [[X:%.*]]
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float 0x7FF8002000000000, float %x)
- ret float %med3
-}
-
-define float @fmed3_nan_0_1_f32() {
-; CHECK-LABEL: @fmed3_nan_0_1_f32(
-; CHECK-NEXT: ret float 0.000000e+00
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float 0.0, float 1.0)
- ret float %med3
-}
-
-define float @fmed3_0_nan_1_f32() {
-; CHECK-LABEL: @fmed3_0_nan_1_f32(
-; CHECK-NEXT: ret float 0.000000e+00
-;
- %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 0x7FF8001000000000, float 1.0)
- ret float %med
-}
-
-define float @fmed3_0_1_nan_f32() {
-; CHECK-LABEL: @fmed3_0_1_nan_f32(
-; CHECK-NEXT: ret float 1.000000e+00
-;
- %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float 0x7FF8001000000000)
- ret float %med
-}
-
-define float @fmed3_undef_0_1_f32() {
-; CHECK-LABEL: @fmed3_undef_0_1_f32(
-; CHECK-NEXT: ret float 0.000000e+00
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float undef, float 0.0, float 1.0)
- ret float %med3
-}
-
-define float @fmed3_0_undef_1_f32() {
-; CHECK-LABEL: @fmed3_0_undef_1_f32(
-; CHECK-NEXT: ret float 0.000000e+00
-;
- %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float undef, float 1.0)
- ret float %med
-}
-
-define float @fmed3_0_1_undef_f32() {
-; CHECK-LABEL: @fmed3_0_1_undef_f32(
-; CHECK-NEXT: ret float 1.000000e+00
-;
- %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float undef)
- ret float %med
-}
-
-define float @fmed3_poison_x_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_poison_x_y_f32(
-; CHECK-NEXT: ret float poison
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float poison, float %x, float %y)
- ret float %med3
-}
-
-define float @fmed3_x_poison_y_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_x_poison_y_f32(
-; CHECK-NEXT: ret float poison
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float poison, float %y)
- ret float %med3
-}
-
-define float @fmed3_x_y_poison_f32(float %x, float %y) {
-; CHECK-LABEL: @fmed3_x_y_poison_f32(
-; CHECK-NEXT: ret float poison
-;
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float poison)
- ret float %med3
-}
-
-; --------------------------------------------------------------------
; llvm.amdgcn.icmp
; --------------------------------------------------------------------
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/fmed3-fpext-fold.ll b/llvm/test/Transforms/InstCombine/AMDGPU/fmed3-fpext-fold.ll
new file mode 100644
index 0000000..66011ad1
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/fmed3-fpext-fold.ll
@@ -0,0 +1,642 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; Unknown/default target
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=instcombine < %s | FileCheck -check-prefixes=NO-FMED3F16,UNKNOWN %s
+
+; Known target, no med3_f16
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=instcombine < %s | FileCheck -check-prefixes=NO-FMED3F16,GFX8 %s
+
+; Has med3_f16
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=instcombine < %s | FileCheck -check-prefixes=GFX9 %s
+
+
+declare float @llvm.fabs.f32(float) #0
+declare half @llvm.fabs.f16(half) #0
+declare float @llvm.amdgcn.fmed3.f32(float, float, float) #0
+
+define float @fmed3_f32_fpext_f16(half %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1:[0-9]+]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1:[0-9]+]] {
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG0]], half [[ARG1]], half [[ARG2]])
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_flags(half %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_flags
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call nsz float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_flags
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[MED31:%.*]] = call nsz half @llvm.amdgcn.fmed3.f16(half [[ARG0]], half [[ARG1]], half [[ARG2]])
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call nsz float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_k0(half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k0
+; NO-FMED3F16-SAME: (half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG1_EXT]], float [[ARG2_EXT]], float 2.000000e+00)
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k0
+; GFX9-SAME: (half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG1]], half [[ARG2]], half 0xH4000)
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 2.0, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_k1(half %arg0, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k1
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG2_EXT]], float 2.000000e+00)
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k1
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG0]], half [[ARG2]], half 0xH4000)
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float 2.0, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_k2(half %arg0, half %arg1) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k2
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float 2.000000e+00)
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k2
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG0]], half [[ARG1]], half 0xH4000)
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float 2.0)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_k0_k1(half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k0_k1
+; NO-FMED3F16-SAME: (half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG2_EXT]], float 0.000000e+00, float 1.600000e+01)
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k0_k1
+; GFX9-SAME: (half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG2]], half 0xH0000, half 0xH4C00)
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 16.0, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_k0_k2(half %arg1) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k0_k2
+; NO-FMED3F16-SAME: (half [[ARG1:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG1_EXT]], float 0.000000e+00, float 2.000000e+00)
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k0_k2
+; GFX9-SAME: (half [[ARG1:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG1]], half 0xH0000, half 0xH4000)
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg1.ext = fpext half %arg1 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float %arg1.ext, float 2.0)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_fabs(half %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_fabs
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[FABS_ARG0:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
+; NO-FMED3F16-NEXT: [[FABS_ARG1:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
+; NO-FMED3F16-NEXT: [[FABS_ARG2:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[FABS_ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[FABS_ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[FABS_ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_fabs
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[FABS_ARG0:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
+; GFX9-NEXT: [[FABS_ARG1:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
+; GFX9-NEXT: [[FABS_ARG2:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[FABS_ARG0]], half [[FABS_ARG1]], half [[FABS_ARG2]])
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %fabs.arg0 = call half @llvm.fabs.f16(half %arg0)
+ %fabs.arg1 = call half @llvm.fabs.f16(half %arg1)
+ %fabs.arg2 = call half @llvm.fabs.f16(half %arg2)
+ %arg0.ext = fpext half %fabs.arg0 to float
+ %arg1.ext = fpext half %fabs.arg1 to float
+ %arg2.ext = fpext half %fabs.arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_fabs_f32_fpext_f16(half %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_fabs_f32_fpext_f16
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
+; NO-FMED3F16-NEXT: [[FABS_EXT_ARG0:%.*]] = fpext half [[TMP1]] to float
+; NO-FMED3F16-NEXT: [[TMP2:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
+; NO-FMED3F16-NEXT: [[FABS_EXT_ARG1:%.*]] = fpext half [[TMP2]] to float
+; NO-FMED3F16-NEXT: [[TMP3:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
+; NO-FMED3F16-NEXT: [[FABS_EXT_ARG2:%.*]] = fpext half [[TMP3]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FABS_EXT_ARG0]], float [[FABS_EXT_ARG1]], float [[FABS_EXT_ARG2]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_fabs_f32_fpext_f16
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
+; GFX9-NEXT: [[TMP2:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
+; GFX9-NEXT: [[TMP3:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[TMP1]], half [[TMP2]], half [[TMP3]])
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %fabs.ext.arg0 = call float @llvm.fabs.f32(float %arg0.ext)
+ %fabs.ext.arg1 = call float @llvm.fabs.f32(float %arg1.ext)
+ %fabs.ext.arg2 = call float @llvm.fabs.f32(float %arg2.ext)
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %fabs.ext.arg0, float %fabs.ext.arg1, float %fabs.ext.arg2)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_fneg(half %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_fneg
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[FNEG_ARG0:%.*]] = fneg half [[ARG0]]
+; NO-FMED3F16-NEXT: [[FNEG_ARG1:%.*]] = fneg half [[ARG1]]
+; NO-FMED3F16-NEXT: [[FNEG_ARG2:%.*]] = fneg half [[ARG2]]
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[FNEG_ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[FNEG_ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[FNEG_ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_fneg
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[FNEG_ARG0:%.*]] = fneg half [[ARG0]]
+; GFX9-NEXT: [[FNEG_ARG1:%.*]] = fneg half [[ARG1]]
+; GFX9-NEXT: [[FNEG_ARG2:%.*]] = fneg half [[ARG2]]
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[FNEG_ARG0]], half [[FNEG_ARG1]], half [[FNEG_ARG2]])
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %fneg.arg0 = fneg half %arg0
+ %fneg.arg1 = fneg half %arg1
+ %fneg.arg2 = fneg half %arg2
+ %arg0.ext = fpext half %fneg.arg0 to float
+ %arg1.ext = fpext half %fneg.arg1 to float
+ %arg2.ext = fpext half %fneg.arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_fneg_f32_fpext_f16(half %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_fneg_f32_fpext_f16
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[FNEG_EXT_ARG0:%.*]] = fneg float [[ARG0_EXT]]
+; NO-FMED3F16-NEXT: [[FNEG_EXT_ARG1:%.*]] = fneg float [[ARG1_EXT]]
+; NO-FMED3F16-NEXT: [[FNEG_EXT_ARG2:%.*]] = fneg float [[ARG2_EXT]]
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FNEG_EXT_ARG0]], float [[FNEG_EXT_ARG1]], float [[FNEG_EXT_ARG2]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_fneg_f32_fpext_f16
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; GFX9-NEXT: [[FNEG_EXT_ARG0:%.*]] = fneg float [[ARG0_EXT]]
+; GFX9-NEXT: [[FNEG_EXT_ARG1:%.*]] = fneg float [[ARG1_EXT]]
+; GFX9-NEXT: [[FNEG_EXT_ARG2:%.*]] = fneg float [[ARG2_EXT]]
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FNEG_EXT_ARG0]], float [[FNEG_EXT_ARG1]], float [[FNEG_EXT_ARG2]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %fneg.ext.arg0 = fneg float %arg0.ext
+ %fneg.ext.arg1 = fneg float %arg1.ext
+ %fneg.ext.arg2 = fneg float %arg2.ext
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %fneg.ext.arg0, float %fneg.ext.arg1, float %fneg.ext.arg2)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_fneg_fabs(half %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_fneg_fabs
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[FABS_ARG0:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
+; NO-FMED3F16-NEXT: [[FABS_ARG1:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
+; NO-FMED3F16-NEXT: [[FABS_ARG2:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
+; NO-FMED3F16-NEXT: [[FNEG_FABS_ARG0:%.*]] = fneg half [[FABS_ARG0]]
+; NO-FMED3F16-NEXT: [[FNEG_FABS_ARG1:%.*]] = fneg half [[FABS_ARG1]]
+; NO-FMED3F16-NEXT: [[FNEG_FABS_ARG2:%.*]] = fneg half [[FABS_ARG2]]
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[FNEG_FABS_ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[FNEG_FABS_ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[FNEG_FABS_ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_fneg_fabs
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[FABS_ARG0:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
+; GFX9-NEXT: [[FABS_ARG1:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
+; GFX9-NEXT: [[FABS_ARG2:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
+; GFX9-NEXT: [[FNEG_FABS_ARG0:%.*]] = fneg half [[FABS_ARG0]]
+; GFX9-NEXT: [[FNEG_FABS_ARG1:%.*]] = fneg half [[FABS_ARG1]]
+; GFX9-NEXT: [[FNEG_FABS_ARG2:%.*]] = fneg half [[FABS_ARG2]]
+; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[FNEG_FABS_ARG0]], half [[FNEG_FABS_ARG1]], half [[FNEG_FABS_ARG2]])
+; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %fabs.arg0 = call half @llvm.fabs.f16(half %arg0)
+ %fabs.arg1 = call half @llvm.fabs.f16(half %arg1)
+ %fabs.arg2 = call half @llvm.fabs.f16(half %arg2)
+ %fneg.fabs.arg0 = fneg half %fabs.arg0
+ %fneg.fabs.arg1 = fneg half %fabs.arg1
+ %fneg.fabs.arg2 = fneg half %fabs.arg2
+ %arg0.ext = fpext half %fneg.fabs.arg0 to float
+ %arg1.ext = fpext half %fneg.fabs.arg1 to float
+ %arg2.ext = fpext half %fneg.fabs.arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_fneg_fabs_f32_fpext_f16(half %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_fneg_fabs_f32_fpext_f16
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
+; NO-FMED3F16-NEXT: [[FABS_EXT_ARG0:%.*]] = fpext half [[TMP1]] to float
+; NO-FMED3F16-NEXT: [[TMP2:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
+; NO-FMED3F16-NEXT: [[FABS_EXT_ARG1:%.*]] = fpext half [[TMP2]] to float
+; NO-FMED3F16-NEXT: [[TMP3:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
+; NO-FMED3F16-NEXT: [[FABS_EXT_ARG2:%.*]] = fpext half [[TMP3]] to float
+; NO-FMED3F16-NEXT: [[FNEG_FABS_EXT_ARG0:%.*]] = fneg float [[FABS_EXT_ARG0]]
+; NO-FMED3F16-NEXT: [[FNEG_FABS_EXT_ARG1:%.*]] = fneg float [[FABS_EXT_ARG1]]
+; NO-FMED3F16-NEXT: [[FNEG_FABS_EXT_ARG2:%.*]] = fneg float [[FABS_EXT_ARG2]]
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FNEG_FABS_EXT_ARG0]], float [[FNEG_FABS_EXT_ARG1]], float [[FNEG_FABS_EXT_ARG2]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_fneg_fabs_f32_fpext_f16
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
+; GFX9-NEXT: [[FABS_EXT_ARG0:%.*]] = fpext half [[TMP1]] to float
+; GFX9-NEXT: [[TMP2:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
+; GFX9-NEXT: [[FABS_EXT_ARG1:%.*]] = fpext half [[TMP2]] to float
+; GFX9-NEXT: [[TMP3:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
+; GFX9-NEXT: [[FABS_EXT_ARG2:%.*]] = fpext half [[TMP3]] to float
+; GFX9-NEXT: [[FNEG_FABS_EXT_ARG0:%.*]] = fneg float [[FABS_EXT_ARG0]]
+; GFX9-NEXT: [[FNEG_FABS_EXT_ARG1:%.*]] = fneg float [[FABS_EXT_ARG1]]
+; GFX9-NEXT: [[FNEG_FABS_EXT_ARG2:%.*]] = fneg float [[FABS_EXT_ARG2]]
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FNEG_FABS_EXT_ARG0]], float [[FNEG_FABS_EXT_ARG1]], float [[FNEG_FABS_EXT_ARG2]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %fabs.ext.arg0 = call float @llvm.fabs.f32(float %arg0.ext)
+ %fabs.ext.arg1 = call float @llvm.fabs.f32(float %arg1.ext)
+ %fabs.ext.arg2 = call float @llvm.fabs.f32(float %arg2.ext)
+ %fneg.fabs.ext.arg0 = fneg float %fabs.ext.arg0
+ %fneg.fabs.ext.arg1 = fneg float %fabs.ext.arg1
+ %fneg.fabs.ext.arg2 = fneg float %fabs.ext.arg2
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %fneg.fabs.ext.arg0, float %fneg.fabs.ext.arg1, float %fneg.fabs.ext.arg2)
+ ret float %med3
+}
+
+; --------------------------------------------------------------------------------
+; Negative tests
+; --------------------------------------------------------------------------------
+
+define float @fmed3_f32_fpext_f16_multi_use_0(half %arg0, half %arg1, half %arg2, ptr addrspace(1) %ptr) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_multi_use_0
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: store float [[ARG0_EXT]], ptr addrspace(1) [[PTR]], align 4
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_multi_use_0
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; GFX9-NEXT: store float [[ARG0_EXT]], ptr addrspace(1) [[PTR]], align 4
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ store float %arg0.ext, ptr addrspace(1) %ptr
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_multi_use_1(half %arg0, half %arg1, half %arg2, ptr addrspace(1) %ptr) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_multi_use_1
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: store float [[ARG1_EXT]], ptr addrspace(1) [[PTR]], align 4
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_multi_use_1
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; GFX9-NEXT: store float [[ARG1_EXT]], ptr addrspace(1) [[PTR]], align 4
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ store float %arg1.ext, ptr addrspace(1) %ptr
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_multi_use_2(half %arg0, half %arg1, half %arg2, ptr addrspace(1) %ptr) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_multi_use_2
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: store float [[ARG2_EXT]], ptr addrspace(1) [[PTR]], align 4
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_multi_use_2
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; GFX9-NEXT: store float [[ARG2_EXT]], ptr addrspace(1) [[PTR]], align 4
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ store float %arg2.ext, ptr addrspace(1) %ptr
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_bf16(bfloat %arg0, bfloat %arg1, bfloat %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_bf16
+; NO-FMED3F16-SAME: (bfloat [[ARG0:%.*]], bfloat [[ARG1:%.*]], bfloat [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext bfloat [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext bfloat [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext bfloat [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_bf16
+; GFX9-SAME: (bfloat [[ARG0:%.*]], bfloat [[ARG1:%.*]], bfloat [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext bfloat [[ARG0]] to float
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext bfloat [[ARG1]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext bfloat [[ARG2]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext bfloat %arg0 to float
+ %arg1.ext = fpext bfloat %arg1 to float
+ %arg2.ext = fpext bfloat %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_bf16_0(bfloat %arg0, half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_bf16_0
+; NO-FMED3F16-SAME: (bfloat [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext bfloat [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_bf16_0
+; GFX9-SAME: (bfloat [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext bfloat [[ARG0]] to float
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext bfloat %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_bf16_1(half %arg0, bfloat %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_bf16_1
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], bfloat [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext bfloat [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_bf16_1
+; GFX9-SAME: (half [[ARG0:%.*]], bfloat [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext bfloat [[ARG1]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext bfloat %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_bf16_2(half %arg0, half %arg1, bfloat %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_bf16_2
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], bfloat [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext bfloat [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_bf16_2
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], bfloat [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext bfloat [[ARG2]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext bfloat %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_unrepresentable_k0(half %arg1, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k0
+; NO-FMED3F16-SAME: (half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG1_EXT]], float [[ARG2_EXT]], float 0x41F0000000000000)
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k0
+; GFX9-SAME: (half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG1_EXT]], float [[ARG2_EXT]], float 0x41F0000000000000)
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg1.ext = fpext half %arg1 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x41f0000000000000, float %arg1.ext, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_unrepresentable_k1(half %arg0, half %arg2) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k1
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG2_EXT]], float 0x41F0000000000000)
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k1
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG2_EXT]], float 0x41F0000000000000)
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg2.ext = fpext half %arg2 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float 0x41f0000000000000, float %arg2.ext)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_unrepresentable_k2(half %arg0, half %arg1) #1 {
+; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k2
+; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]]) #[[ATTR1]] {
+; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float 0x41F0000000000000)
+; NO-FMED3F16-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k2
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]]) #[[ATTR1]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float 0x41F0000000000000)
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = fpext half %arg0 to float
+ %arg1.ext = fpext half %arg1 to float
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float 0x41f0000000000000)
+ ret float %med3
+}
+
+define float @fmed3_f32_fpext_f16_strictfp(half %arg0, half %arg1, half %arg2) #2 {
+; UNKNOWN-LABEL: define float @fmed3_f32_fpext_f16_strictfp
+; UNKNOWN-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR2:[0-9]+]] {
+; UNKNOWN-NEXT: [[ARG0_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG0]], metadata !"fpexcept.strict")
+; UNKNOWN-NEXT: [[ARG1_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG1]], metadata !"fpexcept.strict")
+; UNKNOWN-NEXT: [[ARG2_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG2]], metadata !"fpexcept.strict")
+; UNKNOWN-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]]) #[[ATTR2]]
+; UNKNOWN-NEXT: ret float [[MED3]]
+;
+; GFX8-LABEL: define float @fmed3_f32_fpext_f16_strictfp
+; GFX8-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR2:[0-9]+]] {
+; GFX8-NEXT: [[ARG0_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG0]], metadata !"fpexcept.strict")
+; GFX8-NEXT: [[ARG1_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG1]], metadata !"fpexcept.strict")
+; GFX8-NEXT: [[ARG2_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG2]], metadata !"fpexcept.strict")
+; GFX8-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]]) #[[ATTR4:[0-9]+]]
+; GFX8-NEXT: ret float [[MED3]]
+;
+; GFX9-LABEL: define float @fmed3_f32_fpext_f16_strictfp
+; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR2:[0-9]+]] {
+; GFX9-NEXT: [[ARG0_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG0]], metadata !"fpexcept.strict")
+; GFX9-NEXT: [[ARG1_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG1]], metadata !"fpexcept.strict")
+; GFX9-NEXT: [[ARG2_EXT:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half [[ARG2]], metadata !"fpexcept.strict")
+; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]]) #[[ATTR5:[0-9]+]]
+; GFX9-NEXT: ret float [[MED3]]
+;
+ %arg0.ext = call float @llvm.experimental.constrained.fpext.f32.f16(half %arg0, metadata !"fpexcept.strict")
+ %arg1.ext = call float @llvm.experimental.constrained.fpext.f32.f16(half %arg1, metadata !"fpexcept.strict")
+ %arg2.ext = call float @llvm.experimental.constrained.fpext.f32.f16(half %arg2, metadata !"fpexcept.strict")
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext) #2
+ ret float %med3
+}
+
+attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn }
+attributes #2 = { strictfp }
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll b/llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll
index a31b47b..d931100 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll
@@ -1,613 +1,772 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; Unknown/default target
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=instcombine < %s | FileCheck -check-prefixes=NO-FMED3F16,UNKNOWN %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; Known target, no med3_f16
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=instcombine < %s | FileCheck -check-prefixes=NO-FMED3F16,GFX8 %s
+; Test with "amdgpu-ieee" set to true and false
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=instcombine -mcpu=gfx600 < %s | FileCheck -check-prefixes=CHECK,IEEE1,HAS-IEEE-BIT1 %s
+; RUN: sed 's/\"true\"/\"false\"/g' %s | opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx600 -passes=instcombine | FileCheck -check-prefixes=CHECK,IEEE0 %s
-; Has med3_f16
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=instcombine < %s | FileCheck -check-prefixes=GFX9 %s
+; Test with gfx12 since there is no ieee bit anymore and the attribute is ignored.
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes=instcombine < %s | FileCheck -check-prefixes=CHECK,IEEE1,NO-IEEE-BIT %s
+; RUN: sed 's/\"true\"/\"false\"/g' %s | opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes=instcombine | FileCheck -check-prefixes=CHECK,IEEE1,NO-IEEE-BIT %s
+; --------------------------------------------------------------------
+; llvm.amdgcn.fmed3
+; --------------------------------------------------------------------
-declare float @llvm.fabs.f32(float) #0
-declare half @llvm.fabs.f16(half) #0
declare float @llvm.amdgcn.fmed3.f32(float, float, float) #0
-define float @fmed3_f32_fpext_f16(half %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1:[0-9]+]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1:[0-9]+]] {
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG0]], half [[ARG1]], half [[ARG2]])
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
+define float @fmed3_f32(float %x, float %y, float %z) #1 {
+; CHECK-LABEL: define float @fmed3_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]], float [[Z:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float [[Y]], float [[Z]])
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float %z)
+ ret float %med3
+}
+
+define float @fmed3_canonicalize_x_c0_c1_f32(float %x) #1 {
+; CHECK-LABEL: define float @fmed3_canonicalize_x_c0_c1_f32(
+; CHECK-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float 0.000000e+00, float 1.000000e+00)
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0.0, float 1.0)
+ ret float %med3
+}
+
+define float @fmed3_canonicalize_c0_x_c1_f32(float %x) #1 {
+; CHECK-LABEL: define float @fmed3_canonicalize_c0_x_c1_f32(
+; CHECK-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float 0.000000e+00, float 1.000000e+00)
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float %x, float 1.0)
+ ret float %med3
+}
+
+define float @fmed3_canonicalize_c0_c1_x_f32(float %x) #1 {
+; CHECK-LABEL: define float @fmed3_canonicalize_c0_c1_x_f32(
+; CHECK-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float 0.000000e+00, float 1.000000e+00)
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float %x)
+ ret float %med3
+}
+
+define float @fmed3_canonicalize_x_y_c_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_canonicalize_x_y_c_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float [[Y]], float 1.000000e+00)
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 1.0)
+ ret float %med3
+}
+
+define float @fmed3_canonicalize_x_c_y_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_canonicalize_x_c_y_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float [[Y]], float 1.000000e+00)
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 1.0, float %y)
+ ret float %med3
+}
+
+define float @fmed3_canonicalize_c_x_y_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_canonicalize_c_x_y_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float [[Y]], float 1.000000e+00)
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 1.0, float %x, float %y)
+ ret float %med3
+}
+
+define float @fmed3_undef_x_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_undef_x_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_undef_x_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float undef, float %x, float %y)
+ ret float %med3
+}
+
+define float @fmed3_fmf_undef_x_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_fmf_undef_x_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call nnan float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_fmf_undef_x_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call nnan float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call nnan float @llvm.amdgcn.fmed3.f32(float undef, float %x, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_undef_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_x_undef_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_x_undef_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float undef, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_y_undef_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_x_y_undef_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_x_y_undef_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float undef)
+ ret float %med3
+}
+
+define float @fmed3_qnan0_x_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_qnan0_x_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_qnan0_x_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000000000000, float %x, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_qnan0_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_x_qnan0_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_x_qnan0_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8000000000000, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_y_qnan0_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_x_y_qnan0_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_x_y_qnan0_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF8000000000000)
+ ret float %med3
+}
+
+define float @fmed3_qnan1_x_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_qnan1_x_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_qnan1_x_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000100000000, float %x, float %y)
+ ret float %med3
+}
+
+; This can return any of the qnans.
+define float @fmed3_qnan0_qnan1_qnan2_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_qnan0_qnan1_qnan2_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float 0x7FF8030000000000
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000100000000, float 0x7FF8002000000000, float 0x7FF8030000000000)
+ ret float %med3
+}
+
+define float @fmed3_constant_src0_0_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_constant_src0_0_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float 5.000000e-01
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.5, float -1.0, float 4.0)
+ ret float %med3
+}
+
+define float @fmed3_constant_src0_1_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_constant_src0_1_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float 5.000000e-01
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.5, float 4.0, float -1.0)
+ ret float %med3
+}
+
+define float @fmed3_constant_src1_0_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_constant_src1_0_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float 5.000000e-01
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float -1.0, float 0.5, float 4.0)
+ ret float %med3
+}
+
+define float @fmed3_constant_src1_1_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_constant_src1_1_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float 5.000000e-01
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 4.0, float 0.5, float -1.0)
+ ret float %med3
+}
+
+define float @fmed3_constant_src2_0_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_constant_src2_0_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float 5.000000e-01
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float -1.0, float 4.0, float 0.5)
+ ret float %med3
}
-define float @fmed3_f32_fpext_f16_flags(half %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_flags
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call nsz float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_flags
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[MED31:%.*]] = call nsz half @llvm.amdgcn.fmed3.f16(half [[ARG0]], half [[ARG1]], half [[ARG2]])
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call nsz float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
+define float @fmed3_constant_src2_1_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_constant_src2_1_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float 5.000000e-01
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 4.0, float -1.0, float 0.5)
+ ret float %med3
+}
+
+define float @fmed3_x_qnan0_qnan1_f32(float %x) #1 {
+; IEEE1-LABEL: define float @fmed3_x_qnan0_qnan1_f32(
+; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float [[X]]
+;
+; IEEE0-LABEL: define float @fmed3_x_qnan0_qnan1_f32(
+; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF8002000000000)
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8001000000000, float 0x7FF8002000000000)
ret float %med3
}
-define float @fmed3_f32_fpext_f16_k0(half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k0
-; NO-FMED3F16-SAME: (half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG1_EXT]], float [[ARG2_EXT]], float 2.000000e+00)
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k0
-; GFX9-SAME: (half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG1]], half [[ARG2]], half 0xH4000)
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 2.0, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_k1(half %arg0, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k1
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG2_EXT]], float 2.000000e+00)
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k1
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG0]], half [[ARG2]], half 0xH4000)
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float 2.0, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_k2(half %arg0, half %arg1) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k2
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float 2.000000e+00)
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k2
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG0]], half [[ARG1]], half 0xH4000)
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float 2.0)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_k0_k1(half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k0_k1
-; NO-FMED3F16-SAME: (half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG2_EXT]], float 0.000000e+00, float 1.600000e+01)
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k0_k1
-; GFX9-SAME: (half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG2]], half 0xH0000, half 0xH4C00)
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 16.0, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_k0_k2(half %arg1) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_k0_k2
-; NO-FMED3F16-SAME: (half [[ARG1:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG1_EXT]], float 0.000000e+00, float 2.000000e+00)
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_k0_k2
-; GFX9-SAME: (half [[ARG1:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[ARG1]], half 0xH0000, half 0xH4000)
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg1.ext = fpext half %arg1 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float %arg1.ext, float 2.0)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_fabs(half %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_fabs
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[FABS_ARG0:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
-; NO-FMED3F16-NEXT: [[FABS_ARG1:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
-; NO-FMED3F16-NEXT: [[FABS_ARG2:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[FABS_ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[FABS_ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[FABS_ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_fabs
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[FABS_ARG0:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
-; GFX9-NEXT: [[FABS_ARG1:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
-; GFX9-NEXT: [[FABS_ARG2:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[FABS_ARG0]], half [[FABS_ARG1]], half [[FABS_ARG2]])
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %fabs.arg0 = call half @llvm.fabs.f16(half %arg0)
- %fabs.arg1 = call half @llvm.fabs.f16(half %arg1)
- %fabs.arg2 = call half @llvm.fabs.f16(half %arg2)
- %arg0.ext = fpext half %fabs.arg0 to float
- %arg1.ext = fpext half %fabs.arg1 to float
- %arg2.ext = fpext half %fabs.arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_fabs_f32_fpext_f16(half %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_fabs_f32_fpext_f16
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
-; NO-FMED3F16-NEXT: [[FABS_EXT_ARG0:%.*]] = fpext half [[TMP1]] to float
-; NO-FMED3F16-NEXT: [[TMP2:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
-; NO-FMED3F16-NEXT: [[FABS_EXT_ARG1:%.*]] = fpext half [[TMP2]] to float
-; NO-FMED3F16-NEXT: [[TMP3:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
-; NO-FMED3F16-NEXT: [[FABS_EXT_ARG2:%.*]] = fpext half [[TMP3]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FABS_EXT_ARG0]], float [[FABS_EXT_ARG1]], float [[FABS_EXT_ARG2]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_fabs_f32_fpext_f16
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
-; GFX9-NEXT: [[TMP2:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
-; GFX9-NEXT: [[TMP3:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[TMP1]], half [[TMP2]], half [[TMP3]])
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %fabs.ext.arg0 = call float @llvm.fabs.f32(float %arg0.ext)
- %fabs.ext.arg1 = call float @llvm.fabs.f32(float %arg1.ext)
- %fabs.ext.arg2 = call float @llvm.fabs.f32(float %arg2.ext)
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %fabs.ext.arg0, float %fabs.ext.arg1, float %fabs.ext.arg2)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_fneg(half %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_fneg
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[FNEG_ARG0:%.*]] = fneg half [[ARG0]]
-; NO-FMED3F16-NEXT: [[FNEG_ARG1:%.*]] = fneg half [[ARG1]]
-; NO-FMED3F16-NEXT: [[FNEG_ARG2:%.*]] = fneg half [[ARG2]]
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[FNEG_ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[FNEG_ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[FNEG_ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_fneg
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[FNEG_ARG0:%.*]] = fneg half [[ARG0]]
-; GFX9-NEXT: [[FNEG_ARG1:%.*]] = fneg half [[ARG1]]
-; GFX9-NEXT: [[FNEG_ARG2:%.*]] = fneg half [[ARG2]]
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[FNEG_ARG0]], half [[FNEG_ARG1]], half [[FNEG_ARG2]])
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %fneg.arg0 = fneg half %arg0
- %fneg.arg1 = fneg half %arg1
- %fneg.arg2 = fneg half %arg2
- %arg0.ext = fpext half %fneg.arg0 to float
- %arg1.ext = fpext half %fneg.arg1 to float
- %arg2.ext = fpext half %fneg.arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_fneg_f32_fpext_f16(half %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_fneg_f32_fpext_f16
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[FNEG_EXT_ARG0:%.*]] = fneg float [[ARG0_EXT]]
-; NO-FMED3F16-NEXT: [[FNEG_EXT_ARG1:%.*]] = fneg float [[ARG1_EXT]]
-; NO-FMED3F16-NEXT: [[FNEG_EXT_ARG2:%.*]] = fneg float [[ARG2_EXT]]
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FNEG_EXT_ARG0]], float [[FNEG_EXT_ARG1]], float [[FNEG_EXT_ARG2]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_fneg_f32_fpext_f16
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; GFX9-NEXT: [[FNEG_EXT_ARG0:%.*]] = fneg float [[ARG0_EXT]]
-; GFX9-NEXT: [[FNEG_EXT_ARG1:%.*]] = fneg float [[ARG1_EXT]]
-; GFX9-NEXT: [[FNEG_EXT_ARG2:%.*]] = fneg float [[ARG2_EXT]]
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FNEG_EXT_ARG0]], float [[FNEG_EXT_ARG1]], float [[FNEG_EXT_ARG2]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %fneg.ext.arg0 = fneg float %arg0.ext
- %fneg.ext.arg1 = fneg float %arg1.ext
- %fneg.ext.arg2 = fneg float %arg2.ext
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %fneg.ext.arg0, float %fneg.ext.arg1, float %fneg.ext.arg2)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_fneg_fabs(half %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_fneg_fabs
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[FABS_ARG0:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
-; NO-FMED3F16-NEXT: [[FABS_ARG1:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
-; NO-FMED3F16-NEXT: [[FABS_ARG2:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
-; NO-FMED3F16-NEXT: [[FNEG_FABS_ARG0:%.*]] = fneg half [[FABS_ARG0]]
-; NO-FMED3F16-NEXT: [[FNEG_FABS_ARG1:%.*]] = fneg half [[FABS_ARG1]]
-; NO-FMED3F16-NEXT: [[FNEG_FABS_ARG2:%.*]] = fneg half [[FABS_ARG2]]
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[FNEG_FABS_ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[FNEG_FABS_ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[FNEG_FABS_ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_fneg_fabs
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[FABS_ARG0:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
-; GFX9-NEXT: [[FABS_ARG1:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
-; GFX9-NEXT: [[FABS_ARG2:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
-; GFX9-NEXT: [[FNEG_FABS_ARG0:%.*]] = fneg half [[FABS_ARG0]]
-; GFX9-NEXT: [[FNEG_FABS_ARG1:%.*]] = fneg half [[FABS_ARG1]]
-; GFX9-NEXT: [[FNEG_FABS_ARG2:%.*]] = fneg half [[FABS_ARG2]]
-; GFX9-NEXT: [[MED31:%.*]] = call half @llvm.amdgcn.fmed3.f16(half [[FNEG_FABS_ARG0]], half [[FNEG_FABS_ARG1]], half [[FNEG_FABS_ARG2]])
-; GFX9-NEXT: [[MED3:%.*]] = fpext half [[MED31]] to float
-; GFX9-NEXT: ret float [[MED3]]
-;
- %fabs.arg0 = call half @llvm.fabs.f16(half %arg0)
- %fabs.arg1 = call half @llvm.fabs.f16(half %arg1)
- %fabs.arg2 = call half @llvm.fabs.f16(half %arg2)
- %fneg.fabs.arg0 = fneg half %fabs.arg0
- %fneg.fabs.arg1 = fneg half %fabs.arg1
- %fneg.fabs.arg2 = fneg half %fabs.arg2
- %arg0.ext = fpext half %fneg.fabs.arg0 to float
- %arg1.ext = fpext half %fneg.fabs.arg1 to float
- %arg2.ext = fpext half %fneg.fabs.arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_fneg_fabs_f32_fpext_f16(half %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_fneg_fabs_f32_fpext_f16
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
-; NO-FMED3F16-NEXT: [[FABS_EXT_ARG0:%.*]] = fpext half [[TMP1]] to float
-; NO-FMED3F16-NEXT: [[TMP2:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
-; NO-FMED3F16-NEXT: [[FABS_EXT_ARG1:%.*]] = fpext half [[TMP2]] to float
-; NO-FMED3F16-NEXT: [[TMP3:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
-; NO-FMED3F16-NEXT: [[FABS_EXT_ARG2:%.*]] = fpext half [[TMP3]] to float
-; NO-FMED3F16-NEXT: [[FNEG_FABS_EXT_ARG0:%.*]] = fneg float [[FABS_EXT_ARG0]]
-; NO-FMED3F16-NEXT: [[FNEG_FABS_EXT_ARG1:%.*]] = fneg float [[FABS_EXT_ARG1]]
-; NO-FMED3F16-NEXT: [[FNEG_FABS_EXT_ARG2:%.*]] = fneg float [[FABS_EXT_ARG2]]
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FNEG_FABS_EXT_ARG0]], float [[FNEG_FABS_EXT_ARG1]], float [[FNEG_FABS_EXT_ARG2]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_fneg_fabs_f32_fpext_f16
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[ARG0]])
-; GFX9-NEXT: [[FABS_EXT_ARG0:%.*]] = fpext half [[TMP1]] to float
-; GFX9-NEXT: [[TMP2:%.*]] = call half @llvm.fabs.f16(half [[ARG1]])
-; GFX9-NEXT: [[FABS_EXT_ARG1:%.*]] = fpext half [[TMP2]] to float
-; GFX9-NEXT: [[TMP3:%.*]] = call half @llvm.fabs.f16(half [[ARG2]])
-; GFX9-NEXT: [[FABS_EXT_ARG2:%.*]] = fpext half [[TMP3]] to float
-; GFX9-NEXT: [[FNEG_FABS_EXT_ARG0:%.*]] = fneg float [[FABS_EXT_ARG0]]
-; GFX9-NEXT: [[FNEG_FABS_EXT_ARG1:%.*]] = fneg float [[FABS_EXT_ARG1]]
-; GFX9-NEXT: [[FNEG_FABS_EXT_ARG2:%.*]] = fneg float [[FABS_EXT_ARG2]]
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[FNEG_FABS_EXT_ARG0]], float [[FNEG_FABS_EXT_ARG1]], float [[FNEG_FABS_EXT_ARG2]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %fabs.ext.arg0 = call float @llvm.fabs.f32(float %arg0.ext)
- %fabs.ext.arg1 = call float @llvm.fabs.f32(float %arg1.ext)
- %fabs.ext.arg2 = call float @llvm.fabs.f32(float %arg2.ext)
- %fneg.fabs.ext.arg0 = fneg float %fabs.ext.arg0
- %fneg.fabs.ext.arg1 = fneg float %fabs.ext.arg1
- %fneg.fabs.ext.arg2 = fneg float %fabs.ext.arg2
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %fneg.fabs.ext.arg0, float %fneg.fabs.ext.arg1, float %fneg.fabs.ext.arg2)
- ret float %med3
-}
-
-; --------------------------------------------------------------------------------
-; Negative tests
-; --------------------------------------------------------------------------------
-
-define float @fmed3_f32_fpext_f16_multi_use_0(half %arg0, half %arg1, half %arg2, ptr addrspace(1) %ptr) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_multi_use_0
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: store float [[ARG0_EXT]], ptr addrspace(1) [[PTR]], align 4
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_multi_use_0
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; GFX9-NEXT: store float [[ARG0_EXT]], ptr addrspace(1) [[PTR]], align 4
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- store float %arg0.ext, ptr addrspace(1) %ptr
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_multi_use_1(half %arg0, half %arg1, half %arg2, ptr addrspace(1) %ptr) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_multi_use_1
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: store float [[ARG1_EXT]], ptr addrspace(1) [[PTR]], align 4
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_multi_use_1
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; GFX9-NEXT: store float [[ARG1_EXT]], ptr addrspace(1) [[PTR]], align 4
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- store float %arg1.ext, ptr addrspace(1) %ptr
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_multi_use_2(half %arg0, half %arg1, half %arg2, ptr addrspace(1) %ptr) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_multi_use_2
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: store float [[ARG2_EXT]], ptr addrspace(1) [[PTR]], align 4
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_multi_use_2
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]], ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; GFX9-NEXT: store float [[ARG2_EXT]], ptr addrspace(1) [[PTR]], align 4
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- store float %arg2.ext, ptr addrspace(1) %ptr
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_bf16(bfloat %arg0, bfloat %arg1, bfloat %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_bf16
-; NO-FMED3F16-SAME: (bfloat [[ARG0:%.*]], bfloat [[ARG1:%.*]], bfloat [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext bfloat [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext bfloat [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext bfloat [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_bf16
-; GFX9-SAME: (bfloat [[ARG0:%.*]], bfloat [[ARG1:%.*]], bfloat [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext bfloat [[ARG0]] to float
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext bfloat [[ARG1]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext bfloat [[ARG2]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext bfloat %arg0 to float
- %arg1.ext = fpext bfloat %arg1 to float
- %arg2.ext = fpext bfloat %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_bf16_0(bfloat %arg0, half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_bf16_0
-; NO-FMED3F16-SAME: (bfloat [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext bfloat [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_bf16_0
-; GFX9-SAME: (bfloat [[ARG0:%.*]], half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext bfloat [[ARG0]] to float
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext bfloat %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_bf16_1(half %arg0, bfloat %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_bf16_1
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], bfloat [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext bfloat [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_bf16_1
-; GFX9-SAME: (half [[ARG0:%.*]], bfloat [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext bfloat [[ARG1]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext bfloat %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_bf16_2(half %arg0, half %arg1, bfloat %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_bf16_2
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], bfloat [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext bfloat [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_bf16_2
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]], bfloat [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext bfloat [[ARG2]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float [[ARG2_EXT]])
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext bfloat %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_unrepresentable_k0(half %arg1, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k0
-; NO-FMED3F16-SAME: (half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG1_EXT]], float [[ARG2_EXT]], float 0x41F0000000000000)
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k0
-; GFX9-SAME: (half [[ARG1:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG1_EXT]], float [[ARG2_EXT]], float 0x41F0000000000000)
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg1.ext = fpext half %arg1 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x41f0000000000000, float %arg1.ext, float %arg2.ext)
- ret float %med3
-}
-
-define float @fmed3_f32_fpext_f16_unrepresentable_k1(half %arg0, half %arg2) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k1
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG2_EXT]], float 0x41F0000000000000)
-; NO-FMED3F16-NEXT: ret float [[MED3]]
-;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k1
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG2:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; GFX9-NEXT: [[ARG2_EXT:%.*]] = fpext half [[ARG2]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG2_EXT]], float 0x41F0000000000000)
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg2.ext = fpext half %arg2 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float 0x41f0000000000000, float %arg2.ext)
- ret float %med3
-}
+define float @fmed3_qnan0_x_qnan1_f32(float %x) #1 {
+; IEEE1-LABEL: define float @fmed3_qnan0_x_qnan1_f32(
+; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float [[X]]
+;
+; IEEE0-LABEL: define float @fmed3_qnan0_x_qnan1_f32(
+; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF8002000000000)
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float %x, float 0x7FF8002000000000)
+ ret float %med3
+}
+
+define float @fmed3_qnan0_qnan1_x_f32(float %x) #1 {
+; IEEE1-LABEL: define float @fmed3_qnan0_qnan1_x_f32(
+; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float [[X]]
+;
+; IEEE0-LABEL: define float @fmed3_qnan0_qnan1_x_f32(
+; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF8002000000000)
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float 0x7FF8002000000000, float %x)
+ ret float %med3
+}
-define float @fmed3_f32_fpext_f16_unrepresentable_k2(half %arg0, half %arg1) #1 {
-; NO-FMED3F16-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k2
-; NO-FMED3F16-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]]) #[[ATTR1]] {
-; NO-FMED3F16-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; NO-FMED3F16-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; NO-FMED3F16-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float 0x41F0000000000000)
-; NO-FMED3F16-NEXT: ret float [[MED3]]
+define float @fmed3_nan_0_1_f32() #1 {
+; CHECK-LABEL: define float @fmed3_nan_0_1_f32(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: ret float 0.000000e+00
;
-; GFX9-LABEL: define float @fmed3_f32_fpext_f16_unrepresentable_k2
-; GFX9-SAME: (half [[ARG0:%.*]], half [[ARG1:%.*]]) #[[ATTR1]] {
-; GFX9-NEXT: [[ARG0_EXT:%.*]] = fpext half [[ARG0]] to float
-; GFX9-NEXT: [[ARG1_EXT:%.*]] = fpext half [[ARG1]] to float
-; GFX9-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[ARG0_EXT]], float [[ARG1_EXT]], float 0x41F0000000000000)
-; GFX9-NEXT: ret float [[MED3]]
-;
- %arg0.ext = fpext half %arg0 to float
- %arg1.ext = fpext half %arg1 to float
- %med3 = call float @llvm.amdgcn.fmed3.f32(float %arg0.ext, float %arg1.ext, float 0x41f0000000000000)
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float 0.0, float 1.0)
ret float %med3
}
+define float @fmed3_0_nan_1_f32() #1 {
+; CHECK-LABEL: define float @fmed3_0_nan_1_f32(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 0x7FF8001000000000, float 1.0)
+ ret float %med
+}
+
+define float @fmed3_0_1_nan_f32() #1 {
+; IEEE1-LABEL: define float @fmed3_0_1_nan_f32(
+; IEEE1-SAME: ) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 0.000000e+00
+;
+; IEEE0-LABEL: define float @fmed3_0_1_nan_f32(
+; IEEE0-SAME: ) #[[ATTR1]] {
+; IEEE0-NEXT: ret float 1.000000e+00
+;
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float 0x7FF8001000000000)
+ ret float %med
+}
+
+define float @fmed3_undef_0_1_f32() #1 {
+; CHECK-LABEL: define float @fmed3_undef_0_1_f32(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float undef, float 0.0, float 1.0)
+ ret float %med3
+}
+
+define float @fmed3_0_undef_1_f32() #1 {
+; CHECK-LABEL: define float @fmed3_0_undef_1_f32(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float undef, float 1.0)
+ ret float %med
+}
+
+define float @fmed3_0_1_undef_f32() #1 {
+; IEEE1-LABEL: define float @fmed3_0_1_undef_f32(
+; IEEE1-SAME: ) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 0.000000e+00
+;
+; IEEE0-LABEL: define float @fmed3_0_1_undef_f32(
+; IEEE0-SAME: ) #[[ATTR1]] {
+; IEEE0-NEXT: ret float 1.000000e+00
+;
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float undef)
+ ret float %med
+}
+
+define float @fmed3_poison_x_y_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_poison_x_y_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float poison
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float poison, float %x, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_poison_y_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_x_poison_y_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float poison
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float poison, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_y_poison_f32(float %x, float %y) #1 {
+; CHECK-LABEL: define float @fmed3_x_y_poison_f32(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: ret float poison
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float poison)
+ ret float %med3
+}
+
+define float @fmed3_snan1_x_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_snan1_x_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float [[Y]]
+;
+; IEEE0-LABEL: define float @fmed3_snan1_x_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF4000000000000, float %x, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_snan1_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_x_snan1_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float [[Y]]
+;
+; IEEE0-LABEL: define float @fmed3_x_snan1_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF4000000000000, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_y_snan1_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_x_y_snan1_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 0x7FFC000000000000
+;
+; IEEE0-LABEL: define float @fmed3_x_y_snan1_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF4000000000000)
+ ret float %med3
+}
+
+define float @fmed3_snan1_x_snan2_f32(float %x) #1 {
+; IEEE1-LABEL: define float @fmed3_snan1_x_snan2_f32(
+; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 0x7FF0000040000000
+;
+; IEEE0-LABEL: define float @fmed3_snan1_x_snan2_f32(
+; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF0000040000000)
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF0000020000000, float %x, float 0x7FF0000040000000)
+ ret float %med3
+}
+
+define float @fmed3_x_snan1_snan2_f32(float %x) #1 {
+; IEEE1-LABEL: define float @fmed3_x_snan1_snan2_f32(
+; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 0x7FF0000040000000
+;
+; IEEE0-LABEL: define float @fmed3_x_snan1_snan2_f32(
+; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF0000040000000)
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF0000020000000, float 0x7FF0000040000000)
+ ret float %med3
+}
+
+define float @fmed3_snan1_snan2_snan3_f32(float %x) #1 {
+; IEEE1-LABEL: define float @fmed3_snan1_snan2_snan3_f32(
+; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 0x7FF0000040000000
+;
+; IEEE0-LABEL: define float @fmed3_snan1_snan2_snan3_f32(
+; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: ret float 0x7FF8000040000000
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF4000000000000, float 0x7FF0000020000000, float 0x7FF0000040000000)
+ ret float %med3
+}
+
+define float @fmed3_snan1_1_2_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_snan1_1_2_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 2.000000e+00
+;
+; IEEE0-LABEL: define float @fmed3_snan1_1_2_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: ret float 1.000000e+00
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF4000000000000, float 1.0, float 2.0)
+ ret float %med3
+}
+
+define float @fmed3_snan1_neg1_2_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_snan1_neg1_2_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 2.000000e+00
+;
+; IEEE0-LABEL: define float @fmed3_snan1_neg1_2_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: ret float -1.000000e+00
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float -1.0, float 0x7FF4000000000000, float 2.0)
+ ret float %med3
+}
+
+define float @fmed3_neg2_3_snan1_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_neg2_3_snan1_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 0x7FFC000000000000
+;
+; IEEE0-LABEL: define float @fmed3_neg2_3_snan1_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: ret float 3.000000e+00
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float -2.0, float 3.0, float 0x7FF4000000000000)
+ ret float %med3
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.fmed3 with default mode implied by shader CC
+; --------------------------------------------------------------------
+
+define amdgpu_ps float @amdgpu_ps_default_fmed3_snan1_x_y_f32(float %x, float %y) {
+; HAS-IEEE-BIT1-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_snan1_x_y_f32(
+; HAS-IEEE-BIT1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2:[0-9]+]] {
+; HAS-IEEE-BIT1-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; HAS-IEEE-BIT1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_snan1_x_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2:[0-9]+]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+; NO-IEEE-BIT-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_snan1_x_y_f32(
+; NO-IEEE-BIT-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2:[0-9]+]] {
+; NO-IEEE-BIT-NEXT: ret float [[Y]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF4000000000000, float %x, float %y)
+ ret float %med3
+}
+
+define amdgpu_ps float @amdgpu_ps_default_fmed3_x_snan1_y_f32(float %x, float %y) {
+; HAS-IEEE-BIT1-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_x_snan1_y_f32(
+; HAS-IEEE-BIT1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2]] {
+; HAS-IEEE-BIT1-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; HAS-IEEE-BIT1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_x_snan1_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+; NO-IEEE-BIT-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_x_snan1_y_f32(
+; NO-IEEE-BIT-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2]] {
+; NO-IEEE-BIT-NEXT: ret float [[Y]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF4000000000000, float %y)
+ ret float %med3
+}
+
+define amdgpu_ps float @amdgpu_ps_default_fmed3_x_y_snan1_f32(float %x, float %y) {
+; HAS-IEEE-BIT1-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_x_y_snan1_f32(
+; HAS-IEEE-BIT1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2]] {
+; HAS-IEEE-BIT1-NEXT: [[MED3:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[Y]])
+; HAS-IEEE-BIT1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_x_y_snan1_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+; NO-IEEE-BIT-LABEL: define amdgpu_ps float @amdgpu_ps_default_fmed3_x_y_snan1_f32(
+; NO-IEEE-BIT-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR2]] {
+; NO-IEEE-BIT-NEXT: ret float 0x7FFC000000000000
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF4000000000000)
+ ret float %med3
+}
+; --------------------------------------------------------------------
+; llvm.amdgcn.fmed3 with default mode shader cc and amdgpu-ieee
+; --------------------------------------------------------------------
+
+define amdgpu_ps float @amdgpu_ps_attr_fmed3_snan1_x_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define amdgpu_ps float @amdgpu_ps_attr_fmed3_snan1_x_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float [[Y]]
+;
+; IEEE0-LABEL: define amdgpu_ps float @amdgpu_ps_attr_fmed3_snan1_x_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF4000000000000, float %x, float %y)
+ ret float %med3
+}
+
+define amdgpu_ps float @amdgpu_ps_attr_fmed3_x_snan1_y_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define amdgpu_ps float @amdgpu_ps_attr_fmed3_x_snan1_y_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float [[Y]]
+;
+; IEEE0-LABEL: define amdgpu_ps float @amdgpu_ps_attr_fmed3_x_snan1_y_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF4000000000000, float %y)
+ ret float %med3
+}
+
+define amdgpu_ps float @amdgpu_ps_attr_fmed3_x_y_snan1_f32(float %x, float %y) #1 {
+; IEEE1-LABEL: define amdgpu_ps float @amdgpu_ps_attr_fmed3_x_y_snan1_f32(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: ret float 0x7FFC000000000000
+;
+; IEEE0-LABEL: define amdgpu_ps float @amdgpu_ps_attr_fmed3_x_y_snan1_f32(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF4000000000000)
+ ret float %med3
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.fmed3 with strictfp calls
+; --------------------------------------------------------------------
+
+define float @fmed3_qnan0_x_y_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_qnan0_x_y_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000000000000, float [[X]], float [[Y]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000000000000, float %x, float %y) strictfp
+ ret float %med3
+}
+
+define float @fmed3_x_qnan0_y_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_x_qnan0_y_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float 0x7FF8000000000000, float [[Y]]) #[[ATTR5]]
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8000000000000, float %y) strictfp
+ ret float %med3
+}
+
+define float @fmed3_x_y_qnan0_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_x_y_qnan0_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float [[Y]], float 0x7FF8000000000000) #[[ATTR5]]
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF8000000000000) strictfp
+ ret float %med3
+}
+
+define float @fmed3_snan1_x_y_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_snan1_x_y_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float 0x7FF4000000000000, float [[X]], float [[Y]]) #[[ATTR5]]
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF4000000000000, float %x, float %y) strictfp
+ ret float %med3
+}
+
+define float @fmed3_x_snan1_y_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_x_snan1_y_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float 0x7FF4000000000000, float [[Y]]) #[[ATTR5]]
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF4000000000000, float %y) strictfp
+ ret float %med3
+}
+
+define float @fmed3_x_y_snan1_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_x_y_snan1_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: [[MED3:%.*]] = call float @llvm.amdgcn.fmed3.f32(float [[X]], float [[Y]], float 0x7FF4000000000000) #[[ATTR5]]
+; CHECK-NEXT: ret float [[MED3]]
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF4000000000000) strictfp
+ ret float %med3
+}
+
+define float @fmed3_poison_x_y_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_poison_x_y_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: ret float poison
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float poison, float %x, float %y) strictfp
+ ret float %med3
+}
+
+define float @fmed3_x_poison_y_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_x_poison_y_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: ret float poison
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float poison, float %y) strictfp
+ ret float %med3
+}
+
+define float @fmed3_x_y_poison_f32_strictfp(float %x, float %y) #2 {
+; CHECK-LABEL: define float @fmed3_x_y_poison_f32_strictfp(
+; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: ret float poison
+;
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float poison) strictfp
+ ret float %med3
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.fmed3 with flags
+; --------------------------------------------------------------------
+
+define float @fmed3_qnan0_x_y_f32_flags(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_qnan0_x_y_f32_flags(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call ninf nsz float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_qnan0_x_y_f32_flags(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call ninf nsz float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call nsz ninf float @llvm.amdgcn.fmed3.f32(float 0x7FF8000000000000, float %x, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_qnan0_y_f32_flags(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_x_qnan0_y_f32_flags(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call ninf nsz float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_x_qnan0_y_f32_flags(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call ninf nsz float @llvm.minimumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call nsz ninf float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8000000000000, float %y)
+ ret float %med3
+}
+
+define float @fmed3_x_y_qnan0_f32_flags(float %x, float %y) #1 {
+; IEEE1-LABEL: define float @fmed3_x_y_qnan0_f32_flags(
+; IEEE1-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE1-NEXT: [[MED3:%.*]] = call ninf nsz float @llvm.minnum.f32(float [[X]], float [[Y]])
+; IEEE1-NEXT: ret float [[MED3]]
+;
+; IEEE0-LABEL: define float @fmed3_x_y_qnan0_f32_flags(
+; IEEE0-SAME: float [[X:%.*]], float [[Y:%.*]]) #[[ATTR1]] {
+; IEEE0-NEXT: [[MED3:%.*]] = call ninf nsz float @llvm.maximumnum.f32(float [[X]], float [[Y]])
+; IEEE0-NEXT: ret float [[MED3]]
+;
+ %med3 = call nsz ninf float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF8000000000000)
+ ret float %med3
+}
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX8: {{.*}}
-; UNKNOWN: {{.*}}
+attributes #1 = { nounwind "amdgpu-ieee"="true" }
+attributes #2 = { nounwind strictfp "amdgpu-ieee"="true" }
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index feba952..61236df 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -282,8 +282,8 @@ define <2 x i1> @test13_fixed_scalable(i64 %X, ptr %P, <2 x i64> %y) nounwind {
define <vscale x 2 x i1> @test13_scalable_scalable(i64 %X, ptr %P, <vscale x 2 x i64> %y) nounwind {
; CHECK-LABEL: @test13_scalable_scalable(
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[X:%.*]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[A_IDX:%.*]] = shl nsw <vscale x 2 x i64> [[DOTSPLAT]], splat (i64 3)
+; CHECK-NEXT: [[TMP3:%.*]] = shl nsw <vscale x 2 x i64> [[DOTSPLATINSERT]], splat (i64 3)
+; CHECK-NEXT: [[A_IDX:%.*]] = shufflevector <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 4
; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP2]], i64 0
diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
index c6329af16..74d07fe 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
@@ -1789,3 +1789,39 @@ define <4 x i32> @PR46872(<4 x i32> %x) {
ret <4 x i32> %a
}
+define <vscale x 4 x i32> @scalable_splat_binop_constant_rhs(<vscale x 4 x i32> %x) {
+; CHECK-LABEL: @scalable_splat_binop_constant_rhs(
+; CHECK-NEXT: [[R1:%.*]] = add <vscale x 4 x i32> [[R:%.*]], splat (i32 42)
+; CHECK-NEXT: [[R2:%.*]] = shufflevector <vscale x 4 x i32> [[R1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: ret <vscale x 4 x i32> [[R2]]
+;
+
+ %splatx = shufflevector <vscale x 4 x i32> %x, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %r = add <vscale x 4 x i32> %splatx, splat (i32 42)
+ ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 4 x float> @scalable_splat_binop_constant_lhs(<vscale x 4 x float> %x) {
+; CHECK-LABEL: @scalable_splat_binop_constant_lhs(
+; CHECK-NEXT: [[R1:%.*]] = fadd <vscale x 4 x float> [[R:%.*]], splat (float 4.200000e+01)
+; CHECK-NEXT: [[R2:%.*]] = shufflevector <vscale x 4 x float> [[R1]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: ret <vscale x 4 x float> [[R2]]
+;
+
+ %splatx = shufflevector <vscale x 4 x float> %x, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+ %r = fadd <vscale x 4 x float> splat (float 42.0), %splatx
+ ret <vscale x 4 x float> %r
+}
+
+; Negative test - shouldn't pull shuffle out as it udiv isn't safe to speculate.
+define <vscale x 4 x i32> @scalable_splat_binop_constant_ub(<vscale x 4 x i32> %x) {
+; CHECK-LABEL: @scalable_splat_binop_constant_ub(
+; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <vscale x 4 x i32> [[X:%.*]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[R:%.*]] = udiv <vscale x 4 x i32> splat (i32 42), [[SPLATX]]
+; CHECK-NEXT: ret <vscale x 4 x i32> [[R]]
+;
+
+ %splatx = shufflevector <vscale x 4 x i32> %x, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %r = udiv <vscale x 4 x i32> splat (i32 42), %splatx
+ ret <vscale x 4 x i32> %r
+}
diff --git a/llvm/test/Transforms/LICM/salvage-hoisted-add.ll b/llvm/test/Transforms/LICM/salvage-hoisted-add.ll
new file mode 100644
index 0000000..1bc5802
--- /dev/null
+++ b/llvm/test/Transforms/LICM/salvage-hoisted-add.ll
@@ -0,0 +1,70 @@
+; RUN: opt -S -passes=licm %s | FileCheck %s
+
+; Check that hoistAdd() in LICM salvages the dbg_value for the hoisted add
+; instruction.
+
+define i32 @hoist_add(ptr %p, ptr %x_p, ptr %length_p) !dbg !5 {
+; CHECK-LABEL: define i32 @hoist_add(
+; CHECK-LABEL: loop:
+; CHECK: #dbg_value(!DIArgList(i32 [[X:%.*]], i32 [[IV:%.*]]), [[META9:![0-9]+]], !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus, DW_OP_stack_value), [[META16:![0-9]+]])
+;
+entry:
+ %x = load i32, ptr %x_p, align 4, !dbg !20, !range !21
+ %length = load i32, ptr %length_p, align 4, !dbg !22, !range !21
+ br label %loop, !dbg !23
+
+loop: ; preds = %backedge, %entry
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ], !dbg !24
+ %arith = add nsw i32 %x, %iv, !dbg !25
+ #dbg_value(i32 %arith, !13, !DIExpression(), !25)
+ %x_check = icmp slt i32 %arith, 4, !dbg !26
+ br i1 %x_check, label %out_of_bounds, label %backedge, !dbg !27
+
+backedge: ; preds = %loop
+ %el.ptr = getelementptr i32, ptr %p, i32 %iv, !dbg !28
+ store i32 1, ptr %el.ptr, align 4, !dbg !29
+ %iv.next = add nuw nsw i32 %iv, 4, !dbg !30
+ %loop_cond = icmp slt i32 %iv.next, %length, !dbg !31
+ br i1 %loop_cond, label %loop, label %exit, !dbg !32
+
+exit: ; preds = %backedge
+ ret i32 %iv.next, !dbg !33
+
+out_of_bounds: ; preds = %loop
+ ret i32 -1, !dbg !34
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "salvage-hoisted-add", directory: "/")
+!2 = !{i32 14}
+!3 = !{i32 8}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "hoist_add", linkageName: "hoist_add", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !{!13}
+!10 = !DIBasicType(name: "ty32", size: 32, encoding: DW_ATE_unsigned)
+!13 = !DILocalVariable(name: "4", scope: !5, file: !1, line: 5, type: !10)
+!20 = !DILocation(line: 1, column: 1, scope: !5)
+!21 = !{i32 0, i32 -2147483648}
+!22 = !DILocation(line: 2, column: 1, scope: !5)
+!23 = !DILocation(line: 3, column: 1, scope: !5)
+!24 = !DILocation(line: 4, column: 1, scope: !5)
+!25 = !DILocation(line: 5, column: 1, scope: !5)
+!26 = !DILocation(line: 6, column: 1, scope: !5)
+!27 = !DILocation(line: 7, column: 1, scope: !5)
+!28 = !DILocation(line: 8, column: 1, scope: !5)
+!29 = !DILocation(line: 9, column: 1, scope: !5)
+!30 = !DILocation(line: 10, column: 1, scope: !5)
+!31 = !DILocation(line: 11, column: 1, scope: !5)
+!32 = !DILocation(line: 12, column: 1, scope: !5)
+!33 = !DILocation(line: 13, column: 1, scope: !5)
+!34 = !DILocation(line: 14, column: 1, scope: !5)
+;.
+; CHECK: [[META9]] = !DILocalVariable(name: "4",
+; CHECK: [[META16]] = !DILocation(line: 5, column: 1,
+;.
diff --git a/llvm/test/Transforms/LICM/salvage-hoisted-binop.ll b/llvm/test/Transforms/LICM/salvage-hoisted-binop.ll
new file mode 100644
index 0000000..3a40fa1
--- /dev/null
+++ b/llvm/test/Transforms/LICM/salvage-hoisted-binop.ll
@@ -0,0 +1,45 @@
+; RUN: opt -S -passes=licm %s | FileCheck %s
+
+; Check that hoistBOAssociation() in LICM salvages the dbg_value for the
+; hoisted binary operation.
+
+define void @hoist_binop(i64 %c1, i64 %c2) !dbg !5 {
+; CHECK-LABEL: define void @hoist_binop(
+; CHECK-LABEL: loop:
+; CHECK: #dbg_value(!DIArgList(i64 [[INDEX:%.*]], i64 [[C1:%.*]]), [[META9:![0-9]+]], !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus, DW_OP_stack_value), [[META13:![0-9]+]])
+;
+entry:
+ br label %loop, !dbg !13
+
+loop: ; preds = %loop, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %loop ], !dbg !14
+ %step.add = add i64 %index, %c1, !dbg !15
+ #dbg_value(i64 %step.add, !11, !DIExpression(), !15)
+ %index.next = add i64 %step.add, %c2, !dbg !16
+ br label %loop, !dbg !17
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "salvage-hoist-binop.ll", directory: "/")
+!2 = !{i32 5}
+!3 = !{i32 3}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "hoist_binop", linkageName: "hoist_binop", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !{!11}
+!10 = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned)
+!11 = !DILocalVariable(name: "2", scope: !5, file: !1, line: 3, type: !10)
+!13 = !DILocation(line: 1, column: 1, scope: !5)
+!14 = !DILocation(line: 2, column: 1, scope: !5)
+!15 = !DILocation(line: 3, column: 1, scope: !5)
+!16 = !DILocation(line: 4, column: 1, scope: !5)
+!17 = !DILocation(line: 5, column: 1, scope: !5)
+;.
+; CHECK: [[META9]] = !DILocalVariable(name: "2",
+; CHECK: [[META13]] = !DILocation(line: 3, column: 1,
+;.
diff --git a/llvm/test/Transforms/LICM/salvage-hoisted-gep.ll b/llvm/test/Transforms/LICM/salvage-hoisted-gep.ll
new file mode 100644
index 0000000..316f81d
--- /dev/null
+++ b/llvm/test/Transforms/LICM/salvage-hoisted-gep.ll
@@ -0,0 +1,58 @@
+; RUN: opt -S -passes=licm %s | FileCheck %s
+
+; Check that hoistGEP() in LICM salvages the dbg_value for the hoisted
+; getelementptr instruction.
+
+define void @hoist_gep(ptr %ptr, i1 %c, i32 %arg) !dbg !5 {
+; CHECK-LABEL: define void @hoist_gep(
+; CHECK-LABEL: loop:
+; CHECK: #dbg_value(!DIArgList(ptr [[PTR:%.*]], i64 [[VAL_EXT:%.*]]), [[META9:![0-9]+]], !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus, DW_OP_stack_value), [[META15:![0-9]+]])
+;
+entry:
+ %arg.ext = zext i32 %arg to i64, !dbg !16
+ br label %loop, !dbg !17
+
+loop: ; preds = %loop, %entry
+ %val = call i32 @get.i32(), !dbg !18
+ %val.ext = zext i32 %val to i64, !dbg !19
+ %ptr2 = getelementptr inbounds i8, ptr %ptr, i64 %val.ext, !dbg !20
+ #dbg_value(ptr %ptr2, !14, !DIExpression(), !20)
+ %ptr3 = getelementptr i8, ptr %ptr2, i64 %arg.ext, !dbg !21
+ call void @use(ptr %ptr3), !dbg !22
+ br i1 %c, label %loop, label %exit, !dbg !23
+
+exit: ; preds = %loop
+ ret void, !dbg !24
+}
+
+declare i32 @get.i32()
+declare void @use(ptr)
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "salvage-hoisted-gep.ll", directory: "/")
+!2 = !{i32 9}
+!3 = !{i32 5}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "hoist_gep", linkageName: "hoist_gep", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !{!14}
+!10 = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned)
+!14 = !DILocalVariable(name: "4", scope: !5, file: !1, line: 5, type: !10)
+!16 = !DILocation(line: 1, column: 1, scope: !5)
+!17 = !DILocation(line: 2, column: 1, scope: !5)
+!18 = !DILocation(line: 3, column: 1, scope: !5)
+!19 = !DILocation(line: 4, column: 1, scope: !5)
+!20 = !DILocation(line: 5, column: 1, scope: !5)
+!21 = !DILocation(line: 6, column: 1, scope: !5)
+!22 = !DILocation(line: 7, column: 1, scope: !5)
+!23 = !DILocation(line: 8, column: 1, scope: !5)
+!24 = !DILocation(line: 9, column: 1, scope: !5)
+;.
+; CHECK: [[META9]] = !DILocalVariable(name: "4",
+; CHECK: [[META15]] = !DILocation(line: 5,
+;.
diff --git a/llvm/test/Transforms/LICM/salvage-hoisted-sub.ll b/llvm/test/Transforms/LICM/salvage-hoisted-sub.ll
new file mode 100644
index 0000000..60f9f2b
--- /dev/null
+++ b/llvm/test/Transforms/LICM/salvage-hoisted-sub.ll
@@ -0,0 +1,70 @@
+; RUN: opt -S -passes=licm %s | FileCheck %s
+
+; Check that hoistSub() in LICM salvages the dbg_value for the hoisted sub
+; instruction.
+
+define i32 @hoist_sub(ptr %p, ptr %x_p, ptr %length_p) !dbg !5 {
+; CHECK-LABEL: define i32 @hoist_sub(
+; CHECK-LABEL: loop:
+; CHECK: #dbg_value(!DIArgList(i32 [[X:%.*]], i32 [[IV:%.*]]), [[META9:![0-9]+]], !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_minus, DW_OP_stack_value), [[META16:![0-9]+]])
+;
+entry:
+ %x = load i32, ptr %x_p, align 4, !dbg !20, !range !21
+ %length = load i32, ptr %length_p, align 4, !dbg !22, !range !21
+ br label %loop, !dbg !23
+
+loop: ; preds = %backedge, %entry
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ], !dbg !24
+ %arith = sub nsw i32 %x, %iv, !dbg !25
+ #dbg_value(i32 %arith, !13, !DIExpression(), !25)
+ %x_check = icmp slt i32 %arith, 4, !dbg !26
+ br i1 %x_check, label %out_of_bounds, label %backedge, !dbg !27
+
+backedge: ; preds = %loop
+ %el.ptr = getelementptr i32, ptr %p, i32 %iv, !dbg !28
+ store i32 1, ptr %el.ptr, align 4, !dbg !29
+ %iv.next = add nuw nsw i32 %iv, 4, !dbg !30
+ %loop_cond = icmp slt i32 %iv.next, %length, !dbg !31
+ br i1 %loop_cond, label %loop, label %exit, !dbg !32
+
+exit: ; preds = %backedge
+ ret i32 %iv.next, !dbg !33
+
+out_of_bounds: ; preds = %loop
+ ret i32 -1, !dbg !34
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "salvage-hoisted-sub.ll", directory: "/")
+!2 = !{i32 14}
+!3 = !{i32 8}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "hoist_sub", linkageName: "hoist_sub", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !{!13}
+!10 = !DIBasicType(name: "ty32", size: 32, encoding: DW_ATE_unsigned)
+!13 = !DILocalVariable(name: "4", scope: !5, file: !1, line: 5, type: !10)
+!20 = !DILocation(line: 1, column: 1, scope: !5)
+!21 = !{i32 0, i32 -2147483648}
+!22 = !DILocation(line: 2, column: 1, scope: !5)
+!23 = !DILocation(line: 3, column: 1, scope: !5)
+!24 = !DILocation(line: 4, column: 1, scope: !5)
+!25 = !DILocation(line: 5, column: 1, scope: !5)
+!26 = !DILocation(line: 6, column: 1, scope: !5)
+!27 = !DILocation(line: 7, column: 1, scope: !5)
+!28 = !DILocation(line: 8, column: 1, scope: !5)
+!29 = !DILocation(line: 9, column: 1, scope: !5)
+!30 = !DILocation(line: 10, column: 1, scope: !5)
+!31 = !DILocation(line: 11, column: 1, scope: !5)
+!32 = !DILocation(line: 12, column: 1, scope: !5)
+!33 = !DILocation(line: 13, column: 1, scope: !5)
+!34 = !DILocation(line: 14, column: 1, scope: !5)
+;.
+; CHECK: [[META9]] = !DILocalVariable(name: "4",
+; CHECK: [[META16]] = !DILocation(line: 5, column: 1,
+;.
diff --git a/llvm/test/Transforms/LoopDistribute/salvage-dbg-values-in-distributed-loops.ll b/llvm/test/Transforms/LoopDistribute/salvage-dbg-values-in-distributed-loops.ll
new file mode 100644
index 0000000..14ae07b
--- /dev/null
+++ b/llvm/test/Transforms/LoopDistribute/salvage-dbg-values-in-distributed-loops.ll
@@ -0,0 +1,77 @@
+; RUN: opt -passes=loop-distribute -enable-loop-distribute -S < %s | FileCheck %s
+
+; Check that removeUnusedInsts() salvages `dbg_value`s which use dead
+; instructions in the distributed loops.
+
+define void @f(ptr noalias %a, ptr noalias %c, ptr noalias %d) !dbg !5 {
+; CHECK-LABEL: define void @f(
+; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[C:%.*]], ptr noalias [[D:%.*]])
+
+entry:
+ br label %for.body, !dbg !21
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ], !dbg !22
+ %add = add nuw nsw i64 %ind, 1, !dbg !23
+
+; CHECK-LABEL: for.body.ldist1:
+; CHECK: #dbg_value(!DIArgList(ptr [[D]], i64 [[IND_LDIST1:%.*]]), [[META16:![0-9]+]], !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_constu, 4, DW_OP_mul, DW_OP_plus, DW_OP_stack_value), [[META28:![0-9]+]])
+;
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind, !dbg !24
+ #dbg_value(ptr %arrayidxA, !12, !DIExpression(), !24)
+ %loadA = load i32, ptr %arrayidxA, align 4, !dbg !25
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add, !dbg !26
+ store i32 %loadA, ptr %arrayidxA_plus_4, align 4, !dbg !27
+
+; CHECK-LABEL: for.body:
+; CHECK: #dbg_value(!DIArgList(ptr [[A]], i64 [[IND:%.*]]), [[META12:![0-9]+]], !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_constu, 4, DW_OP_mul, DW_OP_plus, DW_OP_stack_value), [[DBG24:![0-9]+]])
+;
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind, !dbg !28
+ #dbg_value(ptr %arrayidxD, !16, !DIExpression(), !28)
+ %loadD = load i32, ptr %arrayidxD, align 4, !dbg !29
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind, !dbg !30
+ store i32 %loadD, ptr %arrayidxC, align 4, !dbg !31
+
+ %exitcond = icmp eq i64 %add, 20, !dbg !32
+ br i1 %exitcond, label %for.end, label %for.body, !dbg !33
+
+for.end: ; preds = %for.body
+ ret void, !dbg !34
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "temp.ll", directory: "/")
+!2 = !{i32 14}
+!3 = !{i32 9}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "f", linkageName: "f", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !{!12, !16}
+!10 = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned)
+!12 = !DILocalVariable(name: "3", scope: !5, file: !1, line: 4, type: !10)
+!16 = !DILocalVariable(name: "6", scope: !5, file: !1, line: 8, type: !10)
+!21 = !DILocation(line: 1, column: 1, scope: !5)
+!22 = !DILocation(line: 2, column: 1, scope: !5)
+!23 = !DILocation(line: 3, column: 1, scope: !5)
+!24 = !DILocation(line: 4, column: 1, scope: !5)
+!25 = !DILocation(line: 5, column: 1, scope: !5)
+!26 = !DILocation(line: 6, column: 1, scope: !5)
+!27 = !DILocation(line: 7, column: 1, scope: !5)
+!28 = !DILocation(line: 8, column: 1, scope: !5)
+!29 = !DILocation(line: 9, column: 1, scope: !5)
+!30 = !DILocation(line: 10, column: 1, scope: !5)
+!31 = !DILocation(line: 11, column: 1, scope: !5)
+!32 = !DILocation(line: 12, column: 1, scope: !5)
+!33 = !DILocation(line: 13, column: 1, scope: !5)
+!34 = !DILocation(line: 14, column: 1, scope: !5)
+;.
+; CHECK: [[META12]] = !DILocalVariable(name: "3"
+; CHECK: [[META16]] = !DILocalVariable(name: "6"
+; CHECK: [[DBG24]] = !DILocation(line: 4, column: 1
+; CHECK: [[META28]] = !DILocation(line: 8, column: 1
+;.
diff --git a/llvm/test/Transforms/LoopUnroll/peel-last-iteration.ll b/llvm/test/Transforms/LoopUnroll/peel-last-iteration.ll
new file mode 100644
index 0000000..e04786e
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/peel-last-iteration.ll
@@ -0,0 +1,431 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -p loop-unroll -S %s | FileCheck %s
+
+define i64 @peel_single_block_loop_iv_step_1() {
+; CHECK-LABEL: define i64 @peel_single_block_loop_iv_step_1() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[CMP18_NOT:%.*]] = icmp eq i64 [[IV]], 63
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP18_NOT]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND]])
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp ne i64 [[IV_NEXT]], 64
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %cmp = icmp eq i64 %iv, 63
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %iv.next = add i64 %iv, 1
+ %ec = icmp ne i64 %iv.next, 64
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+; The predicate %cmp doesn't become known in all iterations after peeling.
+define i64 @single_block_loop_iv_step_1_predicate_not_known_true_false_after_peeling() {
+; CHECK-LABEL: define i64 @single_block_loop_iv_step_1_predicate_not_known_true_false_after_peeling() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[UREM:%.*]] = urem i64 [[IV]], 2
+; CHECK-NEXT: [[CMP18_NOT:%.*]] = icmp eq i64 [[UREM]], 1
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP18_NOT]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND]])
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp ne i64 [[IV_NEXT]], 64
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %urem = urem i64 %iv, 2
+ %cmp = icmp eq i64 %urem, 1
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %iv.next = add i64 %iv, 1
+ %ec = icmp ne i64 %iv.next, 64
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+
+
+define i64 @peel_single_block_loop_iv_step_1_eq_pred() {
+; CHECK-LABEL: define i64 @peel_single_block_loop_iv_step_1_eq_pred() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[CMP18_NOT:%.*]] = icmp eq i64 [[IV]], 63
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP18_NOT]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND]])
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 64
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %cmp = icmp eq i64 %iv, 63
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 64
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @peel_single_block_loop_iv_step_1_slt_pred() {
+; CHECK-LABEL: define i64 @peel_single_block_loop_iv_step_1_slt_pred() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[CMP18_NOT:%.*]] = icmp eq i64 [[IV]], 63
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP18_NOT]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND]])
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 64
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %cmp = icmp eq i64 %iv, 63
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %iv.next = add i64 %iv, 1
+ %ec = icmp slt i64 %iv.next, 64
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @peel_single_block_loop_iv_step_1_nested_loop() {
+; CHECK-LABEL: define i64 @peel_single_block_loop_iv_step_1_nested_loop() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[OUTER_HEADER:.*]]
+; CHECK: [[OUTER_HEADER]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV_NEXT_LCSSA:%.*]] = phi i64 [ 0, %[[OUTER_HEADER]] ], [ [[IV_NEXT_PEEL:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[CMP18_NOT_PEEL:%.*]] = icmp eq i64 [[IV_NEXT_LCSSA]], 63
+; CHECK-NEXT: [[COND_PEEL:%.*]] = select i1 [[CMP18_NOT_PEEL]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND_PEEL]])
+; CHECK-NEXT: [[IV_NEXT_PEEL]] = add i64 [[IV_NEXT_LCSSA]], 1
+; CHECK-NEXT: [[EC_PEEL:%.*]] = icmp ne i64 [[IV_NEXT_PEEL]], 64
+; CHECK-NEXT: br i1 [[EC_PEEL]], label %[[LOOP]], label %[[OUTER_LATCH:.*]]
+; CHECK: [[OUTER_LATCH]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV_NEXT_LCSSA]], %[[LOOP]] ]
+; CHECK-NEXT: call void @foo(i32 1)
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %outer.header
+
+outer.header:
+ %outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %outer.latch ]
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %outer.header ], [ %iv.next, %loop ]
+ %cmp = icmp eq i64 %iv, 63
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %iv.next = add i64 %iv, 1
+ %ec = icmp ne i64 %iv.next, 64
+ br i1 %ec, label %loop, label %outer.latch
+
+outer.latch:
+ call void @foo(i32 1)
+ %outer.iv.next = add i64 %outer.iv, 1
+ %outer.ec = icmp ne i64 %outer.iv.next, 100
+ br i1 %outer.ec, label %exit, label %outer.header
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @peel_multi_block_loop_iv_step_1() {
+; CHECK-LABEL: define i64 @peel_multi_block_loop_iv_step_1() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV_NEXT_LCSSA:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_PEEL:%.*]], %[[LATCH:.*]] ]
+; CHECK-NEXT: [[CMP18_NOT_PEEL:%.*]] = icmp eq i64 [[IV_NEXT_LCSSA]], 63
+; CHECK-NEXT: [[COND_PEEL:%.*]] = select i1 [[CMP18_NOT_PEEL]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND_PEEL]])
+; CHECK-NEXT: [[C_PEEL:%.*]] = call i1 @cond()
+; CHECK-NEXT: br i1 [[C_PEEL]], label %[[THEN:.*]], label %[[LATCH]]
+; CHECK: [[THEN]]:
+; CHECK-NEXT: call void @foo(i32 [[COND_PEEL]])
+; CHECK-NEXT: br label %[[LATCH]]
+; CHECK: [[LATCH]]:
+; CHECK-NEXT: [[IV_NEXT_PEEL]] = add i64 [[IV_NEXT_LCSSA]], 1
+; CHECK-NEXT: [[EC_PEEL:%.*]] = icmp ne i64 [[IV_NEXT_PEEL]], 64
+; CHECK-NEXT: br i1 [[EC_PEEL]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV_NEXT_LCSSA]], %[[LATCH]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %cmp = icmp eq i64 %iv, 63
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %c = call i1 @cond()
+ br i1 %c, label %then, label %latch
+
+then:
+ call void @foo(i32 %cond)
+ br label %latch
+
+latch:
+ %iv.next = add i64 %iv, 1
+ %ec = icmp ne i64 %iv.next, 64
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @peel_multi_exit_loop_iv_step_1() {
+; CHECK-LABEL: define i64 @peel_multi_exit_loop_iv_step_1() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; CHECK-NEXT: [[CMP18_NOT:%.*]] = icmp eq i64 [[IV]], 63
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP18_NOT]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND]])
+; CHECK-NEXT: [[C:%.*]] = call i1 @cond()
+; CHECK-NEXT: br i1 [[C]], label %[[EXIT:.*]], label %[[LATCH]]
+; CHECK: [[LATCH]]:
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp ne i64 [[IV_NEXT]], 64
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LATCH]] ], [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %cmp = icmp eq i64 %iv, 63
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %c = call i1 @cond()
+ br i1 %c, label %exit, label %latch
+
+latch:
+ %iv.next = add i64 %iv, 1
+ %ec = icmp ne i64 %iv.next, 64
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+
+define i64 @peel_single_block_loop_iv_step_1_may_execute_only_once(i64 %n) {
+; CHECK-LABEL: define i64 @peel_single_block_loop_iv_step_1_may_execute_only_once(
+; CHECK-SAME: i64 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[N_NOT_0:%.*]] = icmp ne i64 [[N]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[N_NOT_0]])
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i64 [[N]], 1
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[CMP18_NOT:%.*]] = icmp eq i64 [[IV]], [[N]]
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP18_NOT]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND]])
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp ne i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ %n.not.0 = icmp ne i64 %n, 0
+ call void @llvm.assume(i1 %n.not.0)
+ %sub = add nsw i64 %n, 1
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %cmp = icmp eq i64 %iv, %n
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %iv.next = add i64 %iv, 1
+ %ec = icmp ne i64 %iv.next, %n
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @peel_single_block_loop_iv_step_neg_1() {
+; CHECK-LABEL: define i64 @peel_single_block_loop_iv_step_neg_1() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 64, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[CMP18_NOT:%.*]] = icmp eq i64 [[IV]], 1
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP18_NOT]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND]])
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1
+; CHECK-NEXT: [[EC:%.*]] = icmp ne i64 [[IV_NEXT]], 0
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 64, %entry ], [ %iv.next, %loop ]
+ %cmp = icmp eq i64 %iv, 1
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %iv.next = add i64 %iv, -1
+ %ec = icmp ne i64 %iv.next, 0
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @peel_single_block_loop_iv_step_2() {
+; CHECK-LABEL: define i64 @peel_single_block_loop_iv_step_2() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[CMP18_NOT:%.*]] = icmp eq i64 [[IV]], 62
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP18_NOT]], i32 10, i32 20
+; CHECK-NEXT: call void @foo(i32 [[COND]])
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 2
+; CHECK-NEXT: [[EC:%.*]] = icmp ne i64 [[IV_NEXT]], 64
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i64 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %cmp = icmp eq i64 %iv, 62
+ %cond = select i1 %cmp, i32 10, i32 20
+ call void @foo(i32 %cond)
+ %iv.next = add i64 %iv, 2
+ %ec = icmp ne i64 %iv.next, 64
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+define i32 @peel_loop_with_branch_and_phi_uses(ptr %x, i1 %c) {
+; CHECK-LABEL: define i32 @peel_loop_with_branch_and_phi_uses(
+; CHECK-SAME: ptr [[X:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[LOOP_HEADER_PREHEADER:.*]], label %[[EXIT:.*]]
+; CHECK: [[LOOP_HEADER_PREHEADER]]:
+; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
+; CHECK: [[LOOP_HEADER]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[LOOP_HEADER_PREHEADER]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[ADD:%.*]], %[[LOOP_LATCH]] ], [ 0, %[[LOOP_HEADER_PREHEADER]] ]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[IV]], 99
+; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[LOOP_LATCH]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: tail call void @foo(i32 10)
+; CHECK-NEXT: br label %[[LOOP_LATCH]]
+; CHECK: [[LOOP_LATCH]]:
+; CHECK-NEXT: [[GEP_X:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i32 [[IV]]
+; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_X]], align 4
+; CHECK-NEXT: [[ADD]] = add nsw i32 [[L]], [[RED]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp ne i32 [[IV_NEXT]], 100
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[LOOPEXIT:.*]]
+; CHECK: [[LOOPEXIT]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD_LCSSA]], %[[LOOPEXIT]] ]
+; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+;
+entry:
+ br i1 %c, label %loop.header, label %exit
+
+loop.header:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.latch ]
+ %red = phi i32 [ 0, %entry ], [ %add, %loop.latch ]
+ %cmp1 = icmp eq i32 %iv, 99
+ br i1 %cmp1, label %if.then, label %loop.latch
+
+if.then:
+ tail call void @foo(i32 10)
+ br label %loop.latch
+
+loop.latch:
+ %gep.x = getelementptr inbounds nuw i32, ptr %x, i32 %iv
+ %l = load i32, ptr %gep.x, align 4
+ %add = add nsw i32 %l, %red
+ %iv.next = add nuw nsw i32 %iv, 1
+ %ec = icmp ne i32 %iv.next, 100
+ br i1 %ec, label %loop.header, label %loopexit
+
+loopexit:
+ %add.lcssa = phi i32 [ %add, %loop.latch ]
+ br label %exit
+
+exit:
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add.lcssa, %loopexit ]
+ ret i32 %sum.0.lcssa
+}
+
+declare void @foo(i32)
+declare i1 @cond()
diff --git a/llvm/test/Transforms/LoopVersioningLICM/load-from-unknown-address.ll b/llvm/test/Transforms/LoopVersioningLICM/load-from-unknown-address.ll
new file mode 100644
index 0000000..e9b2954
--- /dev/null
+++ b/llvm/test/Transforms/LoopVersioningLICM/load-from-unknown-address.ll
@@ -0,0 +1,307 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
+; RUN: opt < %s -S -passes='function(loop-versioning-licm,loop-mssa(licm))' | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+
+; In these tests we have a loop where we can calculate the bounds of some memory
+; accesses but not others.
+
+; Load from a gep whose bounds can't be calculated as the offset is loaded from memory
+; FIXME: Not knowing the bounds of the gep shouldn't stop us from hoisting the load of rval
+define void @gep_loaded_offset(ptr %p, ptr %q, ptr %r, i32 %n) {
+; CHECK-LABEL: define void @gep_loaded_offset(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[R:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[N_ADDR:%.*]] = phi i32 [ [[DEC:%.*]], %[[WHILE_BODY]] ], [ [[N]], %[[ENTRY]] ]
+; CHECK-NEXT: [[P_ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], %[[WHILE_BODY]] ], [ [[P]], %[[ENTRY]] ]
+; CHECK-NEXT: [[DEC]] = add nsw i32 [[N_ADDR]], -1
+; CHECK-NEXT: [[RVAL:%.*]] = load i64, ptr [[R]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[Q]], i64 [[RVAL]]
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds nuw i8, ptr [[P_ADDR]], i64 4
+; CHECK-NEXT: store i32 [[VAL]], ptr [[P_ADDR]], align 4
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[DEC]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[WHILE_END:.*]], label %[[WHILE_BODY]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %while.body
+
+while.body:
+ %n.addr = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %p.addr = phi ptr [ %incdec.ptr, %while.body ], [ %p, %entry ]
+ %dec = add nsw i32 %n.addr, -1
+ %rval = load i64, ptr %r, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %q, i64 %rval
+ %val = load i32, ptr %arrayidx, align 4
+ %incdec.ptr = getelementptr inbounds nuw i8, ptr %p.addr, i64 4
+ store i32 %val, ptr %p.addr, align 4
+ %tobool.not = icmp eq i32 %dec, 0
+ br i1 %tobool.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+; As above but with a store to the loaded address. This should prevent the loop
+; from being versioned, as we wouldn't be able to do any code motion.
+define void @gep_loaded_offset_with_store(ptr %p, ptr %q, ptr %r, i32 %n) {
+; CHECK-LABEL: define void @gep_loaded_offset_with_store(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[R:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[N_ADDR:%.*]] = phi i32 [ [[DEC:%.*]], %[[WHILE_BODY]] ], [ [[N]], %[[ENTRY]] ]
+; CHECK-NEXT: [[P_ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], %[[WHILE_BODY]] ], [ [[P]], %[[ENTRY]] ]
+; CHECK-NEXT: [[DEC]] = add nsw i32 [[N_ADDR]], -1
+; CHECK-NEXT: [[RVAL:%.*]] = load i64, ptr [[R]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[Q]], i64 [[RVAL]]
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds nuw i8, ptr [[P_ADDR]], i64 4
+; CHECK-NEXT: store i32 [[VAL]], ptr [[P_ADDR]], align 4
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[DEC]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[WHILE_END:.*]], label %[[WHILE_BODY]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %while.body
+
+while.body:
+ %n.addr = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %p.addr = phi ptr [ %incdec.ptr, %while.body ], [ %p, %entry ]
+ %dec = add nsw i32 %n.addr, -1
+ %rval = load i64, ptr %r, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %q, i64 %rval
+ %val = load i32, ptr %arrayidx, align 4
+ store i32 0, ptr %arrayidx, align 4
+ %incdec.ptr = getelementptr inbounds nuw i8, ptr %p.addr, i64 4
+ store i32 %val, ptr %p.addr, align 4
+ %tobool.not = icmp eq i32 %dec, 0
+ br i1 %tobool.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+; Load from a gep whose bounds can't be calculated as the pointer is loaded from memory
+; FIXME: Not knowing the bounds of the gep shouldn't stop us from hoisting the load of rval
+define void @gep_loaded_base(ptr %p, ptr %q, ptr %r, i32 %n) {
+; CHECK-LABEL: define void @gep_loaded_base(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[R:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[N_ADDR:%.*]] = phi i32 [ [[DEC:%.*]], %[[WHILE_BODY]] ], [ [[N]], %[[ENTRY]] ]
+; CHECK-NEXT: [[P_ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], %[[WHILE_BODY]] ], [ [[P]], %[[ENTRY]] ]
+; CHECK-NEXT: [[DEC]] = add nsw i32 [[N_ADDR]], -1
+; CHECK-NEXT: [[RVAL:%.*]] = load ptr, ptr [[R]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[RVAL]], i64 0
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds nuw i8, ptr [[P_ADDR]], i64 4
+; CHECK-NEXT: store i32 [[VAL]], ptr [[P_ADDR]], align 4
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[DEC]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[WHILE_END:.*]], label %[[WHILE_BODY]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %while.body
+
+while.body:
+ %n.addr = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %p.addr = phi ptr [ %incdec.ptr, %while.body ], [ %p, %entry ]
+ %dec = add nsw i32 %n.addr, -1
+ %rval = load ptr, ptr %r, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %rval, i64 0
+ %val = load i32, ptr %arrayidx, align 4
+ %incdec.ptr = getelementptr inbounds nuw i8, ptr %p.addr, i64 4
+ store i32 %val, ptr %p.addr, align 4
+ %tobool.not = icmp eq i32 %dec, 0
+ br i1 %tobool.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+; Load from a gep with an offset that scalar evolution can't describe
+; FIXME: Not knowing the bounds of the gep shouldn't stop us from hoisting the load of qval
+define void @gep_strange_offset(ptr %p, ptr %q, ptr %r, i32 %n) {
+; CHECK-LABEL: define void @gep_strange_offset(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[R:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[N_ADDR:%.*]] = phi i32 [ [[DEC:%.*]], %[[WHILE_BODY]] ], [ [[N]], %[[ENTRY]] ]
+; CHECK-NEXT: [[P_ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], %[[WHILE_BODY]] ], [ [[P]], %[[ENTRY]] ]
+; CHECK-NEXT: [[DEC]] = add nsw i32 [[N_ADDR]], -1
+; CHECK-NEXT: [[QVAL:%.*]] = load i32, ptr [[Q]], align 4
+; CHECK-NEXT: [[REM:%.*]] = srem i32 [[DEC]], 2
+; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[REM]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[R]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[VAL]], [[QVAL]]
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds nuw i8, ptr [[P_ADDR]], i64 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[P_ADDR]], align 4
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[DEC]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[WHILE_END:.*]], label %[[WHILE_BODY]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %while.body
+
+while.body:
+ %n.addr = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %p.addr = phi ptr [ %incdec.ptr, %while.body ], [ %p, %entry ]
+ %dec = add nsw i32 %n.addr, -1
+ %qval = load i32, ptr %q, align 4
+ %rem = srem i32 %dec, 2
+ %idxprom = sext i32 %rem to i64
+ %arrayidx = getelementptr inbounds i32, ptr %r, i64 %idxprom
+ %val = load i32, ptr %arrayidx, align 4
+ %add = add nsw i32 %val, %qval
+ %incdec.ptr = getelementptr inbounds nuw i8, ptr %p.addr, i64 4
+ store i32 %add, ptr %p.addr, align 4
+ %tobool.not = icmp eq i32 %dec, 0
+ br i1 %tobool.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+; A memcpy-like loop where the source address is loaded from a pointer
+; FIXME: We should be able to hoist the load of the source address pointer
+define void @memcpy_load_src(ptr %dst, ptr %src, i32 %n) {
+; CHECK-LABEL: define void @memcpy_load_src(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[N_VAL:%.*]] = phi i32 [ [[DEC:%.*]], %[[WHILE_BODY]] ], [ [[N]], %[[ENTRY]] ]
+; CHECK-NEXT: [[DST_VAL:%.*]] = phi ptr [ [[DST_VAL_NEXT:%.*]], %[[WHILE_BODY]] ], [ [[DST]], %[[ENTRY]] ]
+; CHECK-NEXT: [[DEC]] = add nsw i32 [[N_VAL]], -1
+; CHECK-NEXT: [[SRC_VAL:%.*]] = load ptr, ptr [[SRC]], align 8
+; CHECK-NEXT: [[SRC_VAL_NEXT:%.*]] = getelementptr inbounds nuw i8, ptr [[SRC_VAL]], i64 1
+; CHECK-NEXT: [[DST_VAL_NEXT]] = getelementptr inbounds nuw i8, ptr [[DST_VAL]], i64 1
+; CHECK-NEXT: store ptr [[SRC_VAL_NEXT]], ptr [[SRC]], align 8
+; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[SRC_VAL]], align 1
+; CHECK-NEXT: store i8 [[VAL]], ptr [[DST_VAL]], align 1
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[DEC]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[WHILE_END:.*]], label %[[WHILE_BODY]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %while.body
+
+while.body:
+ %n_val = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %dst_val = phi ptr [ %dst_val.next, %while.body ], [ %dst, %entry ]
+ %dec = add nsw i32 %n_val, -1
+ %src_val = load ptr, ptr %src, align 8
+ %src_val.next = getelementptr inbounds nuw i8, ptr %src_val, i64 1
+ %dst_val.next = getelementptr inbounds nuw i8, ptr %dst_val, i64 1
+ store ptr %src_val.next, ptr %src, align 8
+ %val = load i8, ptr %src_val, align 1
+ store i8 %val, ptr %dst_val, align 1
+ %tobool.not = icmp eq i32 %dec, 0
+ br i1 %tobool.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+; A memcpy-like loop where the destination address is loaded from a pointer
+; FIXME: We could hoist the load of the destination address, but doing the
+; bounds check of the store through that pointer itself requires using the
+; hoisted load.
+define void @memcpy_load_dst(ptr %dst, ptr %src, i32 %n) {
+; CHECK-LABEL: define void @memcpy_load_dst(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[N_VAL:%.*]] = phi i32 [ [[DEC:%.*]], %[[WHILE_BODY]] ], [ [[N]], %[[ENTRY]] ]
+; CHECK-NEXT: [[SRC_VAL:%.*]] = phi ptr [ [[SRC_VAL_NEXT:%.*]], %[[WHILE_BODY]] ], [ [[SRC]], %[[ENTRY]] ]
+; CHECK-NEXT: [[DEC]] = add nsw i32 [[N_VAL]], -1
+; CHECK-NEXT: [[DST_VAL:%.*]] = load ptr, ptr [[DST]], align 8
+; CHECK-NEXT: [[SRC_VAL_NEXT]] = getelementptr inbounds nuw i8, ptr [[SRC_VAL]], i64 1
+; CHECK-NEXT: [[DST_VAL_NEXT:%.*]] = getelementptr inbounds nuw i8, ptr [[DST_VAL]], i64 1
+; CHECK-NEXT: store ptr [[DST_VAL_NEXT]], ptr [[DST]], align 8
+; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[SRC_VAL]], align 1
+; CHECK-NEXT: store i8 [[VAL]], ptr [[DST_VAL]], align 1
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[DEC]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[WHILE_END:.*]], label %[[WHILE_BODY]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %while.body
+
+while.body:
+ %n_val = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %src_val = phi ptr [ %src_val.next, %while.body ], [ %src, %entry ]
+ %dec = add nsw i32 %n_val, -1
+ %dst_val = load ptr, ptr %dst, align 8
+ %src_val.next = getelementptr inbounds nuw i8, ptr %src_val, i64 1
+ %dst_val.next = getelementptr inbounds nuw i8, ptr %dst_val, i64 1
+ store ptr %dst_val.next, ptr %dst, align 8
+ %val = load i8, ptr %src_val, align 1
+ store i8 %val, ptr %dst_val, align 1
+ %tobool.not = icmp eq i32 %dec, 0
+ br i1 %tobool.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+; A memcpy-like loop where both the source and destination pointers are loaded from pointers
+; FIXME: We could hoist the loads of both addresses, but doing the bounds check
+; of the store through the destination address itself requires using the hoisted
+; load.
+define void @memcpy_load_src_dst(ptr %dst, ptr %src, i32 %n) {
+; CHECK-LABEL: define void @memcpy_load_src_dst(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[N_VAL:%.*]] = phi i32 [ [[DEC:%.*]], %[[WHILE_BODY]] ], [ [[N]], %[[ENTRY]] ]
+; CHECK-NEXT: [[DEC]] = add nsw i32 [[N_VAL]], -1
+; CHECK-NEXT: [[SRC_VAL:%.*]] = load ptr, ptr [[SRC]], align 8
+; CHECK-NEXT: [[DST_VAL:%.*]] = load ptr, ptr [[DST]], align 8
+; CHECK-NEXT: [[SRC_VAL_NEXT:%.*]] = getelementptr inbounds nuw i8, ptr [[SRC_VAL]], i64 1
+; CHECK-NEXT: [[DST_VAL_NEXT:%.*]] = getelementptr inbounds nuw i8, ptr [[DST_VAL]], i64 1
+; CHECK-NEXT: store ptr [[SRC_VAL_NEXT]], ptr [[SRC]], align 8
+; CHECK-NEXT: store ptr [[DST_VAL_NEXT]], ptr [[DST]], align 8
+; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[SRC_VAL]], align 1
+; CHECK-NEXT: store i8 [[VAL]], ptr [[DST_VAL]], align 1
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[DEC]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[WHILE_END:.*]], label %[[WHILE_BODY]]
+; CHECK: [[WHILE_END]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %while.body
+
+while.body:
+ %n_val = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %dec = add nsw i32 %n_val, -1
+ %src_val = load ptr, ptr %src, align 8
+ %dst_val = load ptr, ptr %dst, align 8
+ %src_val.next = getelementptr inbounds nuw i8, ptr %src_val, i64 1
+ %dst_val.next = getelementptr inbounds nuw i8, ptr %dst_val, i64 1
+ store ptr %src_val.next, ptr %src, align 8
+ store ptr %dst_val.next, ptr %dst, align 8
+ %val = load i8, ptr %src_val, align 1
+ store i8 %val, ptr %dst_val, align 1
+ %tobool.not = icmp eq i32 %dec, 0
+ br i1 %tobool.not, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll b/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
index 3f9bd93..b4f8abb 100644
--- a/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
+++ b/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
@@ -57,23 +57,23 @@ define i32 @foo(ptr nocapture %var1, ptr nocapture readnone %var2, ptr nocapture
; CHECK-NEXT: [[CMP2_LVER_ORIG:%.*]] = icmp ult i32 [[INC_LVER_ORIG]], [[ITR]]
; CHECK-NEXT: br i1 [[CMP2_LVER_ORIG]], label [[FOR_BODY3_LVER_ORIG]], label [[FOR_INC11_LOOPEXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: for.body3.ph:
-; CHECK-NEXT: [[ARRAYIDX7_PROMOTED:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4, !alias.scope [[META2:![0-9]+]], !noalias [[META2]]
+; CHECK-NEXT: [[ARRAYIDX7_PROMOTED:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4, !alias.scope [[META2:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY3:%.*]]
; CHECK: for.body3:
; CHECK-NEXT: [[ADD86:%.*]] = phi i32 [ [[ARRAYIDX7_PROMOTED]], [[FOR_BODY3_PH]] ], [ [[ADD8:%.*]], [[FOR_BODY3]] ]
; CHECK-NEXT: [[J_113:%.*]] = phi i32 [ [[J_016]], [[FOR_BODY3_PH]] ], [ [[INC:%.*]], [[FOR_BODY3]] ]
; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[J_113]] to i64
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[VAR1]], i64 [[IDXPROM]]
-; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4, !alias.scope [[META2]], !noalias [[META2]]
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4, !alias.scope [[META5:![0-9]+]], !noalias [[META2]]
; CHECK-NEXT: [[ADD8]] = add nsw i32 [[ADD86]], [[ADD]]
; CHECK-NEXT: [[INC]] = add nuw i32 [[J_113]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[INC]], [[ITR]]
-; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY3]], label [[FOR_INC11_LOOPEXIT_LOOPEXIT5:%.*]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY3]], label [[FOR_INC11_LOOPEXIT_LOOPEXIT5:%.*]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: for.inc11.loopexit.loopexit:
; CHECK-NEXT: br label [[FOR_INC11_LOOPEXIT:%.*]]
; CHECK: for.inc11.loopexit.loopexit5:
; CHECK-NEXT: [[ADD8_LCSSA:%.*]] = phi i32 [ [[ADD8]], [[FOR_BODY3]] ]
-; CHECK-NEXT: store i32 [[ADD8_LCSSA]], ptr [[ARRAYIDX7]], align 4, !alias.scope [[META2]], !noalias [[META2]]
+; CHECK-NEXT: store i32 [[ADD8_LCSSA]], ptr [[ARRAYIDX7]], align 4, !alias.scope [[META2]]
; CHECK-NEXT: br label [[FOR_INC11_LOOPEXIT]]
; CHECK: for.inc11.loopexit:
; CHECK-NEXT: br label [[FOR_INC11]]
diff --git a/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll b/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll
index 22ca534..a31da2a 100644
--- a/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll
+++ b/llvm/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll
@@ -9,7 +9,7 @@
;
; CHECK: for.cond1.for.inc17_crit_edge.us.loopexit5: ; preds = %for.body3.us
; CHECK-NEXT: %add14.us.lcssa = phi float [ %add14.us, %for.body3.us ]
-; CHECK-NEXT: store float %add14.us.lcssa, ptr %arrayidx.us, align 4, !alias.scope !0, !noalias !0
+; CHECK-NEXT: store float %add14.us.lcssa, ptr %arrayidx.us, align 4, !alias.scope !3
; CHECK-NEXT: br label %for.cond1.for.inc17_crit_edge.us
;
define i32 @foo(ptr nocapture %var2, ptr nocapture readonly %var3, i32 %itr) #0 {
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr48223.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr48223.ll
new file mode 100644
index 0000000..415089a
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr48223.ll
@@ -0,0 +1,163 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64 < %s | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v2 < %s | FileCheck %s --check-prefixes=CHECK,SSE,SSE4
+; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v3 < %s | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v4 < %s | FileCheck %s --check-prefixes=CHECK,AVX
+
+%"struct.std::array" = type { [8 x i16] }
+
+define { i64, i64 } @compute_min(ptr noundef nonnull align 2 dereferenceable(16) %x, ptr noundef nonnull align 2 dereferenceable(16) %y) {
+; SSE2-LABEL: @compute_min(
+; SSE2-NEXT: entry:
+; SSE2-NEXT: [[LD0:%.*]] = load i16, ptr [[Y:%.*]], align 2
+; SSE2-NEXT: [[LD1:%.*]] = load i16, ptr [[X:%.*]], align 2
+; SSE2-NEXT: [[LD2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0]], i16 [[LD1]])
+; SSE2-NEXT: [[PT1_1:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 2
+; SSE2-NEXT: [[PT0_1:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 2
+; SSE2-NEXT: [[LD0_1:%.*]] = load i16, ptr [[PT0_1]], align 2
+; SSE2-NEXT: [[LD1_1:%.*]] = load i16, ptr [[PT1_1]], align 2
+; SSE2-NEXT: [[LD2_1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_1]], i16 [[LD1_1]])
+; SSE2-NEXT: [[PT1_2:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 4
+; SSE2-NEXT: [[PT0_2:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 4
+; SSE2-NEXT: [[LD0_2:%.*]] = load i16, ptr [[PT0_2]], align 2
+; SSE2-NEXT: [[LD1_2:%.*]] = load i16, ptr [[PT1_2]], align 2
+; SSE2-NEXT: [[LD2_2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_2]], i16 [[LD1_2]])
+; SSE2-NEXT: [[PT1_3:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 6
+; SSE2-NEXT: [[PT0_3:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 6
+; SSE2-NEXT: [[LD0_3:%.*]] = load i16, ptr [[PT0_3]], align 2
+; SSE2-NEXT: [[LD1_3:%.*]] = load i16, ptr [[PT1_3]], align 2
+; SSE2-NEXT: [[LD2_3:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_3]], i16 [[LD1_3]])
+; SSE2-NEXT: [[PT1_4:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 8
+; SSE2-NEXT: [[PT0_4:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 8
+; SSE2-NEXT: [[LD0_4:%.*]] = load i16, ptr [[PT0_4]], align 2
+; SSE2-NEXT: [[LD1_4:%.*]] = load i16, ptr [[PT1_4]], align 2
+; SSE2-NEXT: [[LD2_4:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_4]], i16 [[LD1_4]])
+; SSE2-NEXT: [[PT1_5:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 10
+; SSE2-NEXT: [[PT0_5:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 10
+; SSE2-NEXT: [[LD0_5:%.*]] = load i16, ptr [[PT0_5]], align 2
+; SSE2-NEXT: [[LD1_5:%.*]] = load i16, ptr [[PT1_5]], align 2
+; SSE2-NEXT: [[LD2_5:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_5]], i16 [[LD1_5]])
+; SSE2-NEXT: [[PT1_6:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 12
+; SSE2-NEXT: [[PT0_6:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 12
+; SSE2-NEXT: [[LD0_6:%.*]] = load i16, ptr [[PT0_6]], align 2
+; SSE2-NEXT: [[LD1_6:%.*]] = load i16, ptr [[PT1_6]], align 2
+; SSE2-NEXT: [[LD2_6:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_6]], i16 [[LD1_6]])
+; SSE2-NEXT: [[PT1_7:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 14
+; SSE2-NEXT: [[PT0_7:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 14
+; SSE2-NEXT: [[LD0_7:%.*]] = load i16, ptr [[PT0_7]], align 2
+; SSE2-NEXT: [[LD1_7:%.*]] = load i16, ptr [[PT1_7]], align 2
+; SSE2-NEXT: [[LD2_7:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_7]], i16 [[LD1_7]])
+; SSE2-NEXT: [[RETVAL_SROA_4_0_INSERT_EXT:%.*]] = zext i16 [[LD2_3]] to i64
+; SSE2-NEXT: [[RETVAL_SROA_4_0_INSERT_SHIFT:%.*]] = shl nuw i64 [[RETVAL_SROA_4_0_INSERT_EXT]], 48
+; SSE2-NEXT: [[RETVAL_SROA_3_0_INSERT_EXT:%.*]] = zext i16 [[LD2_2]] to i64
+; SSE2-NEXT: [[RETVAL_SROA_3_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_3_0_INSERT_EXT]], 32
+; SSE2-NEXT: [[RETVAL_SROA_3_0_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_4_0_INSERT_SHIFT]], [[RETVAL_SROA_3_0_INSERT_SHIFT]]
+; SSE2-NEXT: [[RETVAL_SROA_2_0_INSERT_EXT:%.*]] = zext i16 [[LD2_1]] to i64
+; SSE2-NEXT: [[RETVAL_SROA_2_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_2_0_INSERT_EXT]], 16
+; SSE2-NEXT: [[RETVAL_SROA_2_0_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_3_0_INSERT_INSERT]], [[RETVAL_SROA_2_0_INSERT_SHIFT]]
+; SSE2-NEXT: [[RETVAL_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[LD2]] to i64
+; SSE2-NEXT: [[TMP20:%.*]] = or disjoint i64 [[RETVAL_SROA_2_0_INSERT_INSERT]], [[RETVAL_SROA_0_0_INSERT_EXT]]
+; SSE2-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0
+; SSE2-NEXT: [[RETVAL_SROA_9_8_INSERT_EXT:%.*]] = zext i16 [[LD2_7]] to i64
+; SSE2-NEXT: [[RETVAL_SROA_9_8_INSERT_SHIFT:%.*]] = shl nuw i64 [[RETVAL_SROA_9_8_INSERT_EXT]], 48
+; SSE2-NEXT: [[RETVAL_SROA_8_8_INSERT_EXT:%.*]] = zext i16 [[LD2_6]] to i64
+; SSE2-NEXT: [[RETVAL_SROA_8_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_8_8_INSERT_EXT]], 32
+; SSE2-NEXT: [[RETVAL_SROA_8_8_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_9_8_INSERT_SHIFT]], [[RETVAL_SROA_8_8_INSERT_SHIFT]]
+; SSE2-NEXT: [[RETVAL_SROA_7_8_INSERT_EXT:%.*]] = zext i16 [[LD2_5]] to i64
+; SSE2-NEXT: [[RETVAL_SROA_7_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_7_8_INSERT_EXT]], 16
+; SSE2-NEXT: [[RETVAL_SROA_7_8_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_8_8_INSERT_INSERT]], [[RETVAL_SROA_7_8_INSERT_SHIFT]]
+; SSE2-NEXT: [[RETVAL_SROA_5_8_INSERT_EXT:%.*]] = zext i16 [[LD2_4]] to i64
+; SSE2-NEXT: [[TMP21:%.*]] = or disjoint i64 [[RETVAL_SROA_7_8_INSERT_INSERT]], [[RETVAL_SROA_5_8_INSERT_EXT]]
+; SSE2-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1
+; SSE2-NEXT: ret { i64, i64 } [[DOTFCA_1_INSERT]]
+;
+; SSE4-LABEL: @compute_min(
+; SSE4-NEXT: entry:
+; SSE4-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Y:%.*]], align 2
+; SSE4-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[X:%.*]], align 2
+; SSE4-NEXT: [[TMP2:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; SSE4-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> poison, <2 x i32> <i32 0, i32 4>
+; SSE4-NEXT: [[TMP4:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; SSE4-NEXT: [[TMP5:%.*]] = shufflevector <8 x i16> [[TMP4]], <8 x i16> poison, <2 x i32> <i32 1, i32 5>
+; SSE4-NEXT: [[TMP6:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; SSE4-NEXT: [[TMP7:%.*]] = shufflevector <8 x i16> [[TMP6]], <8 x i16> poison, <2 x i32> <i32 3, i32 6>
+; SSE4-NEXT: [[TMP8:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; SSE4-NEXT: [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP8]], <8 x i16> poison, <2 x i32> <i32 2, i32 7>
+; SSE4-NEXT: [[TMP10:%.*]] = zext <2 x i16> [[TMP9]] to <2 x i64>
+; SSE4-NEXT: [[TMP11:%.*]] = shl nuw <2 x i64> [[TMP10]], <i64 32, i64 48>
+; SSE4-NEXT: [[TMP12:%.*]] = zext <2 x i16> [[TMP7]] to <2 x i64>
+; SSE4-NEXT: [[TMP13:%.*]] = shl nuw <2 x i64> [[TMP12]], <i64 48, i64 32>
+; SSE4-NEXT: [[TMP14:%.*]] = or disjoint <2 x i64> [[TMP11]], [[TMP13]]
+; SSE4-NEXT: [[TMP15:%.*]] = zext <2 x i16> [[TMP5]] to <2 x i64>
+; SSE4-NEXT: [[TMP16:%.*]] = shl nuw nsw <2 x i64> [[TMP15]], splat (i64 16)
+; SSE4-NEXT: [[TMP17:%.*]] = or disjoint <2 x i64> [[TMP14]], [[TMP16]]
+; SSE4-NEXT: [[TMP18:%.*]] = zext <2 x i16> [[TMP3]] to <2 x i64>
+; SSE4-NEXT: [[TMP19:%.*]] = or disjoint <2 x i64> [[TMP17]], [[TMP18]]
+; SSE4-NEXT: [[TMP20:%.*]] = extractelement <2 x i64> [[TMP19]], i64 0
+; SSE4-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0
+; SSE4-NEXT: [[TMP21:%.*]] = extractelement <2 x i64> [[TMP19]], i64 1
+; SSE4-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1
+; SSE4-NEXT: ret { i64, i64 } [[DOTFCA_1_INSERT]]
+;
+; AVX-LABEL: @compute_min(
+; AVX-NEXT: entry:
+; AVX-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Y:%.*]], align 2
+; AVX-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[X:%.*]], align 2
+; AVX-NEXT: [[TMP2:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; AVX-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> poison, <2 x i32> <i32 0, i32 4>
+; AVX-NEXT: [[TMP4:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; AVX-NEXT: [[TMP5:%.*]] = shufflevector <8 x i16> [[TMP4]], <8 x i16> poison, <2 x i32> <i32 1, i32 5>
+; AVX-NEXT: [[TMP6:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; AVX-NEXT: [[TMP7:%.*]] = shufflevector <8 x i16> [[TMP6]], <8 x i16> poison, <2 x i32> <i32 3, i32 6>
+; AVX-NEXT: [[TMP8:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; AVX-NEXT: [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP8]], <8 x i16> poison, <2 x i32> <i32 2, i32 7>
+; AVX-NEXT: [[TMP10:%.*]] = zext <2 x i16> [[TMP9]] to <2 x i64>
+; AVX-NEXT: [[TMP11:%.*]] = shl nuw <2 x i64> [[TMP10]], <i64 32, i64 48>
+; AVX-NEXT: [[TMP12:%.*]] = zext <2 x i16> [[TMP7]] to <2 x i64>
+; AVX-NEXT: [[TMP13:%.*]] = shl nuw <2 x i64> [[TMP12]], <i64 48, i64 32>
+; AVX-NEXT: [[TMP14:%.*]] = or disjoint <2 x i64> [[TMP11]], [[TMP13]]
+; AVX-NEXT: [[TMP15:%.*]] = zext <2 x i16> [[TMP5]] to <2 x i64>
+; AVX-NEXT: [[TMP16:%.*]] = shl nuw nsw <2 x i64> [[TMP15]], splat (i64 16)
+; AVX-NEXT: [[TMP17:%.*]] = or disjoint <2 x i64> [[TMP14]], [[TMP16]]
+; AVX-NEXT: [[TMP18:%.*]] = zext <2 x i16> [[TMP3]] to <2 x i64>
+; AVX-NEXT: [[TMP19:%.*]] = or disjoint <2 x i64> [[TMP17]], [[TMP18]]
+; AVX-NEXT: [[TMP20:%.*]] = extractelement <2 x i64> [[TMP19]], i64 0
+; AVX-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0
+; AVX-NEXT: [[TMP21:%.*]] = extractelement <2 x i64> [[TMP19]], i64 1
+; AVX-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1
+; AVX-NEXT: ret { i64, i64 } [[DOTFCA_1_INSERT]]
+;
+entry:
+ %retval = alloca %"struct.std::array", align 2
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %cmp.not = icmp eq i32 %i.0, 8
+ br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.cond
+ %.fca.0.load = load i64, ptr %retval, align 2
+ %.fca.0.insert = insertvalue { i64, i64 } poison, i64 %.fca.0.load, 0
+ %.fca.1.gep = getelementptr inbounds nuw i8, ptr %retval, i64 8
+ %.fca.1.load = load i64, ptr %.fca.1.gep, align 2
+ %.fca.1.insert = insertvalue { i64, i64 } %.fca.0.insert, i64 %.fca.1.load, 1
+ ret { i64, i64 } %.fca.1.insert
+
+for.body: ; preds = %for.cond
+ %conv = zext nneg i32 %i.0 to i64
+ %pt1 = getelementptr inbounds nuw [8 x i16], ptr %x, i64 0, i64 %conv
+ %pt0 = getelementptr inbounds nuw [8 x i16], ptr %y, i64 0, i64 %conv
+ %ld0 = load i16, ptr %pt0, align 2
+ %ld1 = load i16, ptr %pt1, align 2
+ %cmp.i = icmp slt i16 %ld0, %ld1
+ %sel = select i1 %cmp.i, ptr %pt0, ptr %pt1
+ %ld2 = load i16, ptr %sel, align 2
+ %pt2 = getelementptr inbounds nuw [8 x i16], ptr %retval, i64 0, i64 %conv
+ store i16 %ld2, ptr %pt2, align 2
+ %inc = add nuw nsw i32 %i.0, 1
+ br label %for.cond
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
+; SSE: {{.*}}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reordered-interleaved-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reordered-interleaved-loads.ll
new file mode 100644
index 0000000..a4b3c8e
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reordered-interleaved-loads.ll
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr=+v %s | FileCheck %s
+
+@h = external global [21 x i16]
+@a = external global [21 x [21 x i16]]
+
+define i1 @test(i32 %conv15.12, i16 %0, ptr %1, i16 %2, i16 %3, i16 %4, i16 %5, i32 %conv15.1.3, i16 %6, i32 %conv15.1.4) {
+; CHECK-LABEL: define i1 @test(
+; CHECK-SAME: i32 [[CONV15_12:%.*]], i16 [[TMP0:%.*]], ptr [[TMP1:%.*]], i16 [[TMP2:%.*]], i16 [[TMP3:%.*]], i16 [[TMP4:%.*]], i16 [[TMP5:%.*]], i32 [[CONV15_1_3:%.*]], i16 [[TMP6:%.*]], i32 [[CONV15_1_4:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[TMP1]], align 2
+; CHECK-NEXT: [[TMP8:%.*]] = load i16, ptr @h, align 2
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i16> poison, i16 [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i16> [[TMP9]], i16 [[TMP0]], i32 2
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i16> [[TMP10]], i16 [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i16> [[TMP11]], i16 [[TMP7]], i32 3
+; CHECK-NEXT: [[TMP13:%.*]] = sext <4 x i16> [[TMP12]] to <4 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[TMP13]], <4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq <4 x i16> [[TMP12]], zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> splat (i32 8), <4 x i32> [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[CONV15_12]], i32 3
+; CHECK-NEXT: [[TMP18:%.*]] = xor <4 x i32> [[TMP16]], [[TMP17]]
+; CHECK-NEXT: [[TMP19:%.*]] = icmp sgt <4 x i32> [[TMP14]], [[TMP18]]
+; CHECK-NEXT: [[TMP29:%.*]] = extractelement <4 x i1> [[TMP19]], i32 3
+; CHECK-NEXT: [[CONV30_18:%.*]] = zext i1 [[TMP29]] to i16
+; CHECK-NEXT: store i16 [[CONV30_18]], ptr @a, align 2
+; CHECK-NEXT: [[TMP30:%.*]] = extractelement <4 x i1> [[TMP19]], i32 2
+; CHECK-NEXT: [[CONV30_219:%.*]] = zext i1 [[TMP30]] to i16
+; CHECK-NEXT: store i16 [[CONV30_219]], ptr @a, align 2
+; CHECK-NEXT: [[TMP31:%.*]] = extractelement <4 x i1> [[TMP19]], i32 1
+; CHECK-NEXT: [[CONV30_330:%.*]] = zext i1 [[TMP31]] to i16
+; CHECK-NEXT: store i16 [[CONV30_330]], ptr @a, align 2
+; CHECK-NEXT: [[TMP32:%.*]] = extractelement <4 x i1> [[TMP19]], i32 0
+; CHECK-NEXT: [[CONV30_4:%.*]] = zext i1 [[TMP32]] to i16
+; CHECK-NEXT: store i16 [[CONV30_4]], ptr @a, align 2
+; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i16> poison, i16 [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i16> [[TMP24]], i16 [[TMP4]], i32 1
+; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i16> [[TMP25]], i16 [[TMP5]], i32 2
+; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i16> [[TMP26]], i16 [[TMP6]], i32 3
+; CHECK-NEXT: [[TMP28:%.*]] = sext <4 x i16> [[TMP27]] to <4 x i32>
+; CHECK-NEXT: [[TMP38:%.*]] = load <16 x i16>, ptr getelementptr inbounds nuw (i8, ptr @h, i64 6), align 2
+; CHECK-NEXT: [[TMP39:%.*]] = shufflevector <16 x i16> [[TMP38]], <16 x i16> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+; CHECK-NEXT: [[TMP40:%.*]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[TMP28]], <4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP41:%.*]] = icmp eq <4 x i16> [[TMP39]], zeroinitializer
+; CHECK-NEXT: [[TMP42:%.*]] = select <4 x i1> [[TMP41]], <4 x i32> splat (i32 8), <4 x i32> [[TMP28]]
+; CHECK-NEXT: [[TMP43:%.*]] = insertelement <4 x i32> [[TMP28]], i32 [[CONV15_1_3]], i32 2
+; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i32> [[TMP43]], i32 [[CONV15_1_4]], i32 3
+; CHECK-NEXT: [[TMP45:%.*]] = xor <4 x i32> [[TMP42]], [[TMP44]]
+; CHECK-NEXT: [[TMP37:%.*]] = icmp sgt <4 x i32> [[TMP40]], [[TMP45]]
+; CHECK-NEXT: [[TMP33:%.*]] = extractelement <4 x i1> [[TMP37]], i32 0
+; CHECK-NEXT: [[CONV30_1_1:%.*]] = zext i1 [[TMP33]] to i16
+; CHECK-NEXT: store i16 [[CONV30_1_1]], ptr @a, align 2
+; CHECK-NEXT: [[TMP34:%.*]] = extractelement <4 x i1> [[TMP37]], i32 1
+; CHECK-NEXT: [[CONV30_1_2:%.*]] = zext i1 [[TMP34]] to i16
+; CHECK-NEXT: store i16 [[CONV30_1_2]], ptr @a, align 2
+; CHECK-NEXT: [[TMP35:%.*]] = extractelement <4 x i1> [[TMP37]], i32 2
+; CHECK-NEXT: [[CONV30_1_3:%.*]] = zext i1 [[TMP35]] to i16
+; CHECK-NEXT: store i16 [[CONV30_1_3]], ptr @a, align 2
+; CHECK-NEXT: [[TMP36:%.*]] = extractelement <4 x i1> [[TMP37]], i32 3
+; CHECK-NEXT: ret i1 [[TMP36]]
+;
+entry:
+ %7 = load i16, ptr %1, align 2
+ %conv15.121 = sext i16 %7 to i32
+ %cond.13 = tail call i32 @llvm.smax.i32(i32 %conv15.121, i32 0)
+ %tobool.not.14 = icmp eq i16 %7, 0
+ %cond27.15 = select i1 %tobool.not.14, i32 8, i32 %conv15.121
+ %xor.16 = xor i32 %cond27.15, %conv15.12
+ %cmp28.17 = icmp sgt i32 %cond.13, %xor.16
+ %conv30.18 = zext i1 %cmp28.17 to i16
+ store i16 %conv30.18, ptr @a, align 2
+ %conv15.213 = sext i16 %0 to i32
+ %cond.214 = tail call i32 @llvm.smax.i32(i32 %conv15.213, i32 0)
+ %tobool.not.215 = icmp eq i16 %0, 0
+ %cond27.216 = select i1 %tobool.not.215, i32 8, i32 %conv15.213
+ %xor.217 = xor i32 %cond27.216, %conv15.213
+ %cmp28.218 = icmp sgt i32 %cond.214, %xor.217
+ %conv30.219 = zext i1 %cmp28.218 to i16
+ store i16 %conv30.219, ptr @a, align 2
+ %8 = load i16, ptr @h, align 2
+ %conv15.324 = sext i16 %8 to i32
+ %cond.325 = tail call i32 @llvm.smax.i32(i32 %conv15.324, i32 0)
+ %tobool.not.326 = icmp eq i16 %8, 0
+ %cond27.327 = select i1 %tobool.not.326, i32 8, i32 %conv15.324
+ %xor.328 = xor i32 %cond27.327, %conv15.324
+ %cmp28.329 = icmp sgt i32 %cond.325, %xor.328
+ %conv30.330 = zext i1 %cmp28.329 to i16
+ store i16 %conv30.330, ptr @a, align 2
+ %conv15.4 = sext i16 %2 to i32
+ %cond.4 = tail call i32 @llvm.smax.i32(i32 %conv15.4, i32 0)
+ %tobool.not.4 = icmp eq i16 %2, 0
+ %cond27.4 = select i1 %tobool.not.4, i32 8, i32 %conv15.4
+ %xor.4 = xor i32 %cond27.4, %conv15.4
+ %cmp28.4 = icmp sgt i32 %cond.4, %xor.4
+ %conv30.4 = zext i1 %cmp28.4 to i16
+ store i16 %conv30.4, ptr @a, align 2
+ %9 = load i16, ptr getelementptr inbounds nuw (i8, ptr @h, i64 6), align 2
+ %conv15.1.1 = sext i16 %3 to i32
+ %cond.1.1 = tail call i32 @llvm.smax.i32(i32 %conv15.1.1, i32 0)
+ %tobool.not.1.1 = icmp eq i16 %9, 0
+ %cond27.1.1 = select i1 %tobool.not.1.1, i32 8, i32 %conv15.1.1
+ %xor.1.1 = xor i32 %cond27.1.1, %conv15.1.1
+ %cmp28.1.1 = icmp sgt i32 %cond.1.1, %xor.1.1
+ %conv30.1.1 = zext i1 %cmp28.1.1 to i16
+ store i16 %conv30.1.1, ptr @a, align 2
+ %10 = load i16, ptr getelementptr inbounds nuw (i8, ptr @h, i64 12), align 4
+ %conv15.1.2 = sext i16 %4 to i32
+ %cond.1.2 = tail call i32 @llvm.smax.i32(i32 %conv15.1.2, i32 0)
+ %tobool.not.1.2 = icmp eq i16 %10, 0
+ %cond27.1.2 = select i1 %tobool.not.1.2, i32 8, i32 %conv15.1.2
+ %xor.1.2 = xor i32 %cond27.1.2, %conv15.1.2
+ %cmp28.1.2 = icmp sgt i32 %cond.1.2, %xor.1.2
+ %conv30.1.2 = zext i1 %cmp28.1.2 to i16
+ store i16 %conv30.1.2, ptr @a, align 2
+ %11 = load i16, ptr getelementptr inbounds nuw (i8, ptr @h, i64 18), align 2
+ %conv15.1.32 = sext i16 %5 to i32
+ %cond.1.3 = tail call i32 @llvm.smax.i32(i32 %conv15.1.32, i32 0)
+ %tobool.not.1.3 = icmp eq i16 %11, 0
+ %cond27.1.3 = select i1 %tobool.not.1.3, i32 8, i32 %conv15.1.32
+ %xor.1.3 = xor i32 %cond27.1.3, %conv15.1.3
+ %cmp28.1.3 = icmp sgt i32 %cond.1.3, %xor.1.3
+ %conv30.1.3 = zext i1 %cmp28.1.3 to i16
+ store i16 %conv30.1.3, ptr @a, align 2
+ %12 = load i16, ptr getelementptr inbounds nuw (i8, ptr @h, i64 24), align 8
+ %conv15.1.43 = sext i16 %6 to i32
+ %cond.1.4 = tail call i32 @llvm.smax.i32(i32 %conv15.1.43, i32 0)
+ %tobool.not.1.4 = icmp eq i16 %12, 0
+ %cond27.1.4 = select i1 %tobool.not.1.4, i32 8, i32 %conv15.1.43
+ %xor.1.4 = xor i32 %cond27.1.4, %conv15.1.4
+ %cmp28.1.4 = icmp sgt i32 %cond.1.4, %xor.1.4
+ ret i1 %cmp28.1.4
+}
+
+declare i32 @llvm.smax.i32(i32, i32)
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/unordered-loads-operands.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/unordered-loads-operands.ll
new file mode 100644
index 0000000..1b65a7a
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/unordered-loads-operands.ll
@@ -0,0 +1,71 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr=+v -slp-threshold=-1000 %s | FileCheck %s
+
+define void @test(ptr %mdct_forward_x) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[MDCT_FORWARD_X:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[FOR_COND:.*]]
+; CHECK: [[FOR_COND]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[MDCT_FORWARD_X]], align 8
+; CHECK-NEXT: [[ARRAYIDX2_I_I:%.*]] = getelementptr i8, ptr [[TMP0]], i64 32
+; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr i8, ptr [[TMP0]], i64 24
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x ptr> poison, ptr [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x ptr> [[TMP1]], <4 x ptr> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, <4 x ptr> [[TMP2]], <4 x i64> <i64 28, i64 36, i64 24, i64 28>
+; CHECK-NEXT: [[TMP4:%.*]] = call <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0.i64(ptr align 4 [[ARRAYIDX2_I_I]], i64 -8, <2 x i1> splat (i1 true), i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x ptr> [[TMP2]], <4 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 48, i64 40>
+; CHECK-NEXT: [[TMP7:%.*]] = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> [[TMP6]], i32 4, <2 x i1> splat (i1 true), <2 x float> poison)
+; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> [[TMP3]], i32 4, <4 x i1> splat (i1 true), <4 x float> poison)
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP7]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 0>
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x float> <float poison, float poison, float 0.000000e+00, float poison>, <4 x float> [[TMP10]], <4 x i32> <i32 poison, i32 poison, i32 2, i32 4>
+; CHECK-NEXT: [[TMP12:%.*]] = call <4 x float> @llvm.vector.insert.v4f32.v2f32(<4 x float> [[TMP11]], <2 x float> [[TMP4]], i64 0)
+; CHECK-NEXT: [[TMP13:%.*]] = fsub <4 x float> [[TMP9]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = fadd <4 x float> [[TMP9]], [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <4 x float> [[TMP13]], <4 x float> [[TMP14]], <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+; CHECK-NEXT: [[TMP16:%.*]] = fsub <4 x float> zeroinitializer, [[TMP8]]
+; CHECK-NEXT: [[TMP17:%.*]] = fadd <4 x float> zeroinitializer, [[TMP8]]
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <4 x float> [[TMP16]], <4 x float> [[TMP17]], <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+; CHECK-NEXT: store float 0.000000e+00, ptr [[ADD_PTR_I]], align 4
+; CHECK-NEXT: [[TMP19:%.*]] = fsub <4 x float> [[TMP15]], [[TMP18]]
+; CHECK-NEXT: [[TMP20:%.*]] = fadd <4 x float> [[TMP15]], [[TMP18]]
+; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <4 x float> [[TMP19]], <4 x float> [[TMP20]], <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+; CHECK-NEXT: store <4 x float> [[TMP21]], ptr [[ARRAYIDX2_I_I]], align 4
+; CHECK-NEXT: br label %[[FOR_COND]]
+;
+entry:
+ br label %for.cond
+
+for.cond:
+ %0 = load ptr, ptr %mdct_forward_x, align 8
+ %add.ptr.i = getelementptr i8, ptr %0, i64 24
+ %arrayidx.i.i = getelementptr i8, ptr %0, i64 48
+ %1 = load float, ptr %arrayidx.i.i, align 4
+ %add.i.i = fadd float %1, 0.000000e+00
+ %arrayidx2.i.i = getelementptr i8, ptr %0, i64 32
+ %2 = load float, ptr %arrayidx2.i.i, align 4
+ %sub.i.i = fsub float %1, %2
+ %3 = load float, ptr %add.ptr.i, align 4
+ %add4.i.i = fadd float %3, 0.000000e+00
+ %arrayidx5.i.i = getelementptr i8, ptr %0, i64 40
+ %4 = load float, ptr %arrayidx5.i.i, align 4
+ %sub7.i.i = fsub float %4, %3
+ %sub8.i.i = fsub float %add.i.i, %add4.i.i
+ store float %sub8.i.i, ptr %arrayidx5.i.i, align 4
+ %arrayidx10.i.i = getelementptr i8, ptr %0, i64 28
+ %5 = load float, ptr %arrayidx10.i.i, align 4
+ %sub11.i.i = fsub float 0.000000e+00, %5
+ %arrayidx12.i.i = getelementptr i8, ptr %0, i64 36
+ %6 = load float, ptr %arrayidx12.i.i, align 4
+ %sub13.i.i = fsub float 0.000000e+00, %6
+ store float 0.000000e+00, ptr %add.ptr.i, align 4
+ %sub15.i.i = fsub float %sub.i.i, %sub11.i.i
+ store float %sub15.i.i, ptr %arrayidx2.i.i, align 4
+ %add17.i.i = fadd float %sub7.i.i, %sub13.i.i
+ store float %add17.i.i, ptr %arrayidx12.i.i, align 4
+ %arrayidx20.i.i = getelementptr i8, ptr %0, i64 44
+ store float %sub15.i.i, ptr %arrayidx20.i.i, align 4
+ br label %for.cond
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/long-pointer-distance.ll b/llvm/test/Transforms/SLPVectorizer/X86/long-pointer-distance.ll
new file mode 100644
index 0000000..9cfafd2
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/long-pointer-distance.ll
@@ -0,0 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-grtev4-linux-gnu < %s -mattr=+avx | FileCheck %s
+
+define void @test(ptr %this) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[THIS:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: store <4 x i64> <i64 1, i64 2, i64 3, i64 4>, ptr [[THIS]], align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ store i64 1, ptr %this, align 8
+ %b = getelementptr i8, ptr %this, i64 8
+ store i64 2, ptr %b, align 8
+ %c = getelementptr i8, ptr %this, i64 u0x100000010
+ store i64 3, ptr %c, align 8
+ %d = getelementptr i8, ptr %this, i64 u0x100000018
+ store i64 4, ptr %d, align 8
+ ret void
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
index fff2b72..fd16a52 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
@@ -4,19 +4,40 @@
define void @test() {
; CHECK-LABEL: @test(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr undef, i64 4
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [4 x [4 x i32]], ptr undef, i64 0, i64 1, i64 0
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1
-; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP3]] to <4 x i16>
-; CHECK-NEXT: [[TMP5:%.*]] = sub <4 x i16> zeroinitializer, [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = shl <4 x i16> [[TMP5]], zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i16> [[TMP6]], zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i16> [[TMP7]], <4 x i16> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i16> [[TMP8]], [[TMP7]]
-; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[TMP8]], [[TMP7]]
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
-; CHECK-NEXT: [[TMP13:%.*]] = sub <4 x i16> zeroinitializer, [[TMP11]]
-; CHECK-NEXT: [[TMP15:%.*]] = sext <4 x i16> [[TMP13]] to <4 x i32>
-; CHECK-NEXT: store <4 x i32> [[TMP15]], ptr [[TMP2]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i32
+; CHECK-NEXT: [[TMP4:%.*]] = sub nsw i32 0, [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = shl nsw i32 [[TMP4]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = add nsw i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr undef, i64 5
+; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = zext i8 [[TMP8]] to i32
+; CHECK-NEXT: [[TMP10:%.*]] = sub nsw i32 0, [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = shl nsw i32 [[TMP10]], 0
+; CHECK-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP11]], 0
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr undef, i64 6
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP13]], align 1
+; CHECK-NEXT: [[TMP15:%.*]] = zext i8 [[TMP14]] to i32
+; CHECK-NEXT: [[TMP16:%.*]] = sub nsw i32 0, [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = shl nsw i32 [[TMP16]], 0
+; CHECK-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP17]], 0
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr undef, i64 7
+; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
+; CHECK-NEXT: [[TMP21:%.*]] = zext i8 [[TMP20]] to i32
+; CHECK-NEXT: [[TMP22:%.*]] = sub nsw i32 0, [[TMP21]]
+; CHECK-NEXT: [[TMP23:%.*]] = shl nsw i32 [[TMP22]], 0
+; CHECK-NEXT: [[TMP24:%.*]] = add nsw i32 [[TMP23]], 0
+; CHECK-NEXT: [[TMP25:%.*]] = add nsw i32 [[TMP12]], [[TMP6]]
+; CHECK-NEXT: [[TMP26:%.*]] = sub nsw i32 [[TMP6]], [[TMP12]]
+; CHECK-NEXT: [[TMP27:%.*]] = add nsw i32 [[TMP24]], [[TMP18]]
+; CHECK-NEXT: [[TMP28:%.*]] = sub nsw i32 [[TMP18]], [[TMP24]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x [4 x i32]], ptr undef, i64 0, i64 1, i64 0
+; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i32> <i32 poison, i32 poison, i32 0, i32 0>, i32 [[TMP25]], i32 0
+; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i32> [[TMP30]], i32 [[TMP26]], i32 1
+; CHECK-NEXT: [[TMP32:%.*]] = insertelement <4 x i32> <i32 0, i32 0, i32 poison, i32 poison>, i32 [[TMP27]], i32 2
+; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP28]], i32 3
+; CHECK-NEXT: [[TMP34:%.*]] = sub nsw <4 x i32> [[TMP31]], [[TMP33]]
+; CHECK-NEXT: store <4 x i32> [[TMP34]], ptr [[TMP29]], align 16
; CHECK-NEXT: ret void
;
%1 = getelementptr inbounds i8, ptr undef, i64 4
diff --git a/llvm/test/Transforms/SimplifyCFG/hoist-sink-swifterror-store.ll b/llvm/test/Transforms/SimplifyCFG/hoist-sink-swifterror-store.ll
index 0c13f57..5dff39c 100644
--- a/llvm/test/Transforms/SimplifyCFG/hoist-sink-swifterror-store.ll
+++ b/llvm/test/Transforms/SimplifyCFG/hoist-sink-swifterror-store.ll
@@ -3,6 +3,8 @@
declare void @clobber1()
declare void @clobber2()
+declare swiftcc void @foo(ptr swifterror)
+declare swiftcc void @bar(ptr swifterror, ptr)
; Do not try to sink the stores to the exit block, as this requires introducing
; a select for the pointer operand. This is not allowed for swifterror pointers.
@@ -76,6 +78,22 @@ exit:
; introduces a select for the pointer operand. This is not allowed for
; swifterror pointers.
define swiftcc ptr @sink_load(ptr %arg, ptr swifterror %arg1, i1 %c) {
+; CHECK-LABEL: define swiftcc ptr @sink_load
+; CHECK-SAME: (ptr [[ARG:%.*]], ptr swifterror [[ARG1:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br i1 [[C]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: call void @clobber1()
+; CHECK-NEXT: [[L1:%.*]] = load ptr, ptr [[ARG]], align 8
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: else:
+; CHECK-NEXT: call void @clobber2()
+; CHECK-NEXT: [[L2:%.*]] = load ptr, ptr [[ARG1]], align 8
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[L1]], [[THEN]] ], [ [[L2]], [[ELSE]] ]
+; CHECK-NEXT: ret ptr [[P]]
+;
bb:
br i1 %c, label %then, label %else
@@ -127,3 +145,77 @@ exit:
%p = phi ptr [ %l1, %then ], [ %l2, %else ]
ret ptr %p
}
+
+
+define swiftcc void @sink_call(i1 %c) {
+; CHECK-LABEL: define swiftcc void @sink_call
+; CHECK-SAME: (i1 [[C:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca swifterror ptr, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = alloca swifterror ptr, align 8
+; CHECK-NEXT: br i1 [[C]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: call void @clobber1()
+; CHECK-NEXT: call swiftcc void @foo(ptr [[TMP2]])
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: else:
+; CHECK-NEXT: call void @clobber2()
+; CHECK-NEXT: call swiftcc void @foo(ptr [[TMP1]])
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+ %2 = alloca swifterror ptr, align 8
+ %3 = alloca swifterror ptr, align 8
+ br i1 %c, label %then, label %else
+
+then:
+ call void @clobber1()
+ call swiftcc void @foo(ptr %3)
+ br label %exit
+
+else:
+ call void @clobber2()
+ call swiftcc void @foo(ptr %2)
+ br label %exit
+
+exit:
+ ret void
+}
+
+
+define swiftcc void @safe_sink_call(i1 %c) {
+; CHECK-LABEL: define swiftcc void @safe_sink_call
+; CHECK-SAME: (i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ERR:%.*]] = alloca swifterror ptr, align 8
+; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[B:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: br i1 [[C]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: call void @clobber1()
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: else:
+; CHECK-NEXT: call void @clobber2()
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: [[B_SINK:%.*]] = phi ptr [ [[B]], [[ELSE]] ], [ [[A]], [[THEN]] ]
+; CHECK-NEXT: call swiftcc void @bar(ptr [[ERR]], ptr [[B_SINK]])
+; CHECK-NEXT: ret void
+;
+ %err = alloca swifterror ptr, align 8
+ %a = alloca ptr, align 8
+ %b = alloca ptr, align 8
+ br i1 %c, label %then, label %else
+
+then:
+ call void @clobber1()
+ call swiftcc void @bar(ptr %err, ptr %a)
+ br label %exit
+
+else:
+ call void @clobber2()
+ call swiftcc void @bar(ptr %err, ptr %b)
+ br label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/tools/dsymutil/ARM/swiftmodule.test b/llvm/test/tools/dsymutil/ARM/swiftmodule.test
index 347f284..31b3926 100644
--- a/llvm/test/tools/dsymutil/ARM/swiftmodule.test
+++ b/llvm/test/tools/dsymutil/ARM/swiftmodule.test
@@ -13,6 +13,7 @@
#
# CHECK-NOT: Skipping compiled textual Swift interface: {{.*}}/Inputs/Binary.swiftmodule
# CHECK: Skipping compiled textual Swift interface: {{.*}}/Inputs/FromInterface.swiftmodule
+# CHECK-NOT: Skipping compiled textual Swift interface: {{.*}}/Inputs/FromInterface.swiftmodule
#
---
@@ -26,4 +27,8 @@ objects:
timestamp: 0
type: 50
symbols: []
+ - filename: '../Inputs/FromInterface.swiftmodule'
+ timestamp: 0
+ type: 50
+ symbols: []
...
diff --git a/llvm/test/tools/llvm-symbolizer/Inputs/addr-gsymonly.exe b/llvm/test/tools/llvm-symbolizer/Inputs/addr-gsymonly.exe
new file mode 100755
index 0000000..f6f013b
--- /dev/null
+++ b/llvm/test/tools/llvm-symbolizer/Inputs/addr-gsymonly.exe
Binary files differ
diff --git a/llvm/test/tools/llvm-symbolizer/Inputs/addr-gsymonly.exe.gsym b/llvm/test/tools/llvm-symbolizer/Inputs/addr-gsymonly.exe.gsym
new file mode 100644
index 0000000..a46f78b
--- /dev/null
+++ b/llvm/test/tools/llvm-symbolizer/Inputs/addr-gsymonly.exe.gsym
Binary files differ
diff --git a/llvm/test/tools/llvm-symbolizer/sym-gsymonly.test b/llvm/test/tools/llvm-symbolizer/sym-gsymonly.test
new file mode 100644
index 0000000..0d00c00
--- /dev/null
+++ b/llvm/test/tools/llvm-symbolizer/sym-gsymonly.test
@@ -0,0 +1,93 @@
+# This test is a variant of sym.test. It uses a binary without DWARF debug
+# info, but a corresponding .gsym file. The expectations are the same, except
+# for the fact that GSYM doesn't provide us with column numbers.
+#
+# Source:
+# #include <stdio.h>
+# static inline int inctwo (int *a) {
+# printf ("%d\n",(*a)++);
+# return (*a)++;
+# }
+# static inline int inc (int *a) {
+# printf ("%d\n",inctwo(a));
+# return (*a)++;
+# }
+#
+#
+# int main () {
+# int x = 1;
+# return inc(&x);
+# }
+#
+# Build as : clang -g -O2 addr.c
+extrat gsym file as : llvm-gsymutil --convert=%p/Inputs/addr.exe --out-file=%p/Inputs/addr-gsymonly.exe.gsym
+strip debug as : llvm-objcopy --strip-debug %p/Inputs/addr.exe %p/Inputs/addr-gsymonly.exe
+
+
+RUN: llvm-symbolizer --print-address --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck %s
+RUN: llvm-symbolizer --addresses --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck %s
+RUN: llvm-symbolizer -a --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck %s
+
+CHECK: ??:0:0
+CHECK-EMPTY:
+CHECK-NEXT: 0x40054d
+CHECK-NEXT: inctwo
+CHECK-NEXT: {{[/\]+}}tmp{{[/\]+}}x.c:3:0
+CHECK-NEXT: inc
+CHECK-NEXT: {{[/\]+}}tmp{{[/\]+}}x.c:7:0
+CHECK-NEXT: main
+CHECK-NEXT: {{[/\]+}}tmp{{[/\]+}}x.c:14:0
+CHECK-EMPTY:
+CHECK-NEXT: ??
+CHECK-NEXT: ??:0:0
+
+RUN: llvm-symbolizer --inlining --print-address --pretty-print --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s
+RUN: llvm-symbolizer --inlining --print-address -p --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s
+RUN: llvm-symbolizer --inlines --print-address --pretty-print --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s
+RUN: llvm-symbolizer --inlines --print-address -p --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s
+RUN: llvm-symbolizer -i --print-address --pretty-print --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s
+RUN: llvm-symbolizer -i --print-address -p --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s
+
+# Before 2020-08-04, asan_symbolize.py passed --inlining=true.
+# Support this compatibility alias for a while.
+RUN: llvm-symbolizer --inlining=true --print-address -p --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefix="PRETTY" %s
+
+PRETTY: ??:0:0
+PRETTY: {{[0x]+}}40054d: inctwo at {{[/\]+}}tmp{{[/\]+}}x.c:3:0
+PRETTY: (inlined by) inc at {{[/\]+}}tmp{{[/\]+}}x.c:7:0
+PRETTY: (inlined by) main at {{[/\]+}}tmp{{[/\]+}}x.c:14:0
+PRETTY: ??:0:0
+
+RUN: llvm-addr2line --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefix=A2L %s
+RUN: llvm-addr2line -a --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2L,A2L_A %s
+RUN: llvm-addr2line -f --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2L,A2L_F %s
+RUN: llvm-addr2line -i --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2L,A2L_I %s
+RUN: llvm-addr2line -fi --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2L,A2L_F,A2L_I,A2L_FI %s
+
+RUN: llvm-addr2line -pa --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2LP,A2LP_A %s
+RUN: llvm-addr2line -pf --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2LP,A2LP_F %s
+RUN: llvm-addr2line -paf --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2LP,A2LP_AF %s
+RUN: llvm-addr2line -pai --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2LP,A2LP_A,A2LP_I %s
+RUN: llvm-addr2line -pfi --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2LP,A2LP_F,A2LP_FI %s
+RUN: llvm-addr2line -pafi --obj=%p/Inputs/addr-gsymonly.exe < %p/Inputs/addr.inp | FileCheck -check-prefixes=A2LP,A2LP_AF,A2LP_FI %s
+
+A2L: ??:0
+A2L_A-NEXT: 0x40054d
+A2L_F-NEXT: inctwo
+A2L-NEXT: {{[/\]+}}tmp{{[/\]+}}x.c:3{{$}}
+A2L_FI-NEXT: inc{{$}}
+A2L_I-NEXT: {{[/\]+}}tmp{{[/\]+}}x.c:7{{$}}
+A2L_FI-NEXT: main
+A2L_I-NEXT: {{[/\]+}}tmp{{[/\]+}}x.c:14{{$}}
+A2L_F-NEXT: ??
+A2L-NEXT: ??:0
+
+A2LP: ??:0
+A2LP_A-NEXT: 0x40054d: {{[/\]+}}tmp{{[/\]+}}x.c:3{{$}}
+A2LP_F-NEXT: inctwo at {{[/\]+}}tmp{{[/\]+}}x.c:3{{$}}
+A2LP_AF-NEXT: 0x40054d: inctwo at {{[/\]+}}tmp{{[/\]+}}x.c:3{{$}}
+A2LP_I-NEXT: {{[/\]+}}tmp{{[/\]+}}x.c:7{{$}}
+A2LP_I-NEXT: {{[/\]+}}tmp{{[/\]+}}x.c:14{{$}}
+A2LP_FI-NEXT: (inlined by) inc at {{[/\]+}}tmp{{[/\]+}}x.c:7{{$}}
+A2LP_FI-NEXT: (inlined by) main at {{[/\]+}}tmp{{[/\]+}}x.c:14{{$}}
+A2LP-NEXT: ??:0
diff --git a/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp b/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp
index 9bcc479..e88e076 100644
--- a/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp
+++ b/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/BinaryFormat/MachO.h"
@@ -769,6 +770,7 @@ bool DwarfLinkerForBinary::linkImpl(
MaxDWARFVersion = std::max(Unit.getVersion(), MaxDWARFVersion);
};
+ llvm::StringSet<> SwiftModules;
for (const auto &Obj : Map.objects()) {
// N_AST objects (swiftmodule files) should get dumped directly into the
// appropriate DWARF section.
@@ -777,6 +779,9 @@ bool DwarfLinkerForBinary::linkImpl(
outs() << "DEBUG MAP OBJECT: " << Obj->getObjectFilename() << "\n";
StringRef File = Obj->getObjectFilename();
+ if (!SwiftModules.insert(File).second)
+ continue;
+
auto ErrorOrMem = MemoryBuffer::getFile(File);
if (!ErrorOrMem) {
reportWarning("Could not open '" + File + "'");
diff --git a/llvm/tools/llvm-symbolizer/Opts.td b/llvm/tools/llvm-symbolizer/Opts.td
index d0b227a..10f1e6d 100644
--- a/llvm/tools/llvm-symbolizer/Opts.td
+++ b/llvm/tools/llvm-symbolizer/Opts.td
@@ -16,6 +16,9 @@ class F<string name, string help>: Flag<["--"], name>, HelpText<help>;
def grp_mach_o : OptionGroup<"kind">,
HelpText<"llvm-symbolizer Mach-O Specific Options">;
+def grp_gsym : OptionGroup<"kind">,
+ HelpText<"llvm-symbolizer GSYM Related Options">;
+
def addresses : F<"addresses", "Show address before line information">;
defm adjust_vma
: Eq<"adjust-vma", "Add specified offset to object file addresses">,
@@ -31,9 +34,11 @@ defm default_arch
: Eq<"default-arch", "Default architecture (for multi-arch objects)">,
Group<grp_mach_o>;
defm demangle : B<"demangle", "Demangle function names", "Don't demangle function names">;
+def disable_gsym : F<"disable-gsym", "Don't consider using GSYM files for symbolication">, Group<grp_gsym>;
def filter_markup : Flag<["--"], "filter-markup">, HelpText<"Filter symbolizer markup from stdin.">;
def functions : F<"functions", "Print function name for a given address">;
def functions_EQ : Joined<["--"], "functions=">, HelpText<"Print function name for a given address">, Values<"none,short,linkage">;
+defm gsym_file_directory : Eq<"gsym-file-directory", "Path to directory where to look for GSYM files">, MetaVarName<"<dir>">, Group<grp_gsym>;
def help : F<"help", "Display this help">;
defm dwp : Eq<"dwp", "Path to DWP file to be use for any split CUs">, MetaVarName<"<file>">;
defm dsym_hint
diff --git a/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp b/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp
index 3ba7f59..b80f792 100644
--- a/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp
+++ b/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp
@@ -499,6 +499,8 @@ int llvm_symbolizer_main(int argc, char **argv, const llvm::ToolContext &) {
Opts.DWPName = Args.getLastArgValue(OPT_dwp_EQ).str();
Opts.FallbackDebugPath =
Args.getLastArgValue(OPT_fallback_debug_path_EQ).str();
+ Opts.GsymFileDirectory = Args.getAllArgValues(OPT_gsym_file_directory_EQ);
+ Opts.DisableGsym = Args.hasArg(OPT_disable_gsym);
Opts.PrintFunctions = decideHowToPrintFunctions(Args, IsAddr2Line);
parseIntArg(Args, OPT_print_source_context_lines_EQ,
Config.SourceContextLines);
diff --git a/llvm/unittests/Support/TrailingObjectsTest.cpp b/llvm/unittests/Support/TrailingObjectsTest.cpp
index 6f9d7bd..2590f37 100644
--- a/llvm/unittests/Support/TrailingObjectsTest.cpp
+++ b/llvm/unittests/Support/TrailingObjectsTest.cpp
@@ -17,14 +17,12 @@ namespace {
// This class, beyond being used by the test case, a nice
// demonstration of the intended usage of TrailingObjects, with a
// single trailing array.
-class Class1 final : protected TrailingObjects<Class1, short> {
+class Class1 final : private TrailingObjects<Class1, short> {
friend TrailingObjects;
unsigned NumShorts;
protected:
- size_t numTrailingObjects(OverloadToken<short>) const { return NumShorts; }
-
Class1(ArrayRef<int> ShortArray) : NumShorts(ShortArray.size()) {
// This tests the non-templated getTrailingObjects() that returns a pointer
// when using a single trailing type.
@@ -52,18 +50,15 @@ public:
using TrailingObjects::getTrailingObjects;
};
-// Here, there are two singular optional object types appended. Note
+// Here, there are two singular optional object types appended. Note
// that the alignment of Class2 is automatically increased to account
// for the alignment requirements of the trailing objects.
-class Class2 final : protected TrailingObjects<Class2, double, short> {
+class Class2 final : private TrailingObjects<Class2, double, short> {
friend TrailingObjects;
bool HasShort, HasDouble;
protected:
- size_t numTrailingObjects(OverloadToken<short>) const {
- return HasShort ? 1 : 0;
- }
size_t numTrailingObjects(OverloadToken<double>) const {
return HasDouble ? 1 : 0;
}
@@ -179,14 +174,23 @@ TEST(TrailingObjects, TwoArg) {
}
// This test class is not trying to be a usage demo, just asserting
-// that three args does actually work too (it's the same code as
+// that three args does actually work too (it's the same code that
// handles the second arg, so it's basically covered by the above, but
// just in case..)
-class Class3 final : public TrailingObjects<Class3, double, short, bool> {
+class Class3 final : private TrailingObjects<Class3, double, short, bool> {
friend TrailingObjects;
size_t numTrailingObjects(OverloadToken<double>) const { return 1; }
size_t numTrailingObjects(OverloadToken<short>) const { return 1; }
+
+public:
+ // Pull some protected members in as public, for testability.
+ template <typename... Ty>
+ using FixedSizeStorage = TrailingObjects::FixedSizeStorage<Ty...>;
+
+ using TrailingObjects::additionalSizeToAlloc;
+ using TrailingObjects::getTrailingObjects;
+ using TrailingObjects::totalSizeToAlloc;
};
TEST(TrailingObjects, ThreeArg) {
@@ -216,9 +220,18 @@ TEST(TrailingObjects, ThreeArg) {
1));
}
-class Class4 final : public TrailingObjects<Class4, char, long> {
+class Class4 final : private TrailingObjects<Class4, char, long> {
friend TrailingObjects;
size_t numTrailingObjects(OverloadToken<char>) const { return 1; }
+
+public:
+ // Pull some protected members in as public, for testability.
+ template <typename... Ty>
+ using FixedSizeStorage = TrailingObjects::FixedSizeStorage<Ty...>;
+
+ using TrailingObjects::additionalSizeToAlloc;
+ using TrailingObjects::getTrailingObjects;
+ using TrailingObjects::totalSizeToAlloc;
};
TEST(TrailingObjects, Realignment) {
@@ -255,11 +268,6 @@ class Class5Tmpl : private llvm::TrailingObjects<Derived, float, int> {
typename TrailingObjects::template OverloadToken<float>) const {
return 1;
}
-
- size_t numTrailingObjects(
- typename TrailingObjects::template OverloadToken<int>) const {
- return 2;
- }
};
class Class5 : public Class5Tmpl<Class5> {};
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index b8d33e8..33133d3 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -1128,6 +1128,7 @@ R"(All available -march extensions for RISC-V
svpbmt 1.0
svvptc 1.0
xandesperf 5.0
+ xandesvpackfph 5.0
xcvalu 1.0
xcvbi 1.0
xcvbitmanip 1.0
diff --git a/llvm/unittests/Transforms/Utils/CloningTest.cpp b/llvm/unittests/Transforms/Utils/CloningTest.cpp
index 8f6ec12..09b32bf 100644
--- a/llvm/unittests/Transforms/Utils/CloningTest.cpp
+++ b/llvm/unittests/Transforms/Utils/CloningTest.cpp
@@ -1004,6 +1004,16 @@ protected:
Function::Create(FuncType, GlobalValue::ExternalLinkage, "g", OldM);
G->addMetadata(LLVMContext::MD_type, *MDNode::get(C, {}));
+ auto *NonEntryBlock = BasicBlock::Create(C, "", F);
+ IBuilder.SetInsertPoint(NonEntryBlock);
+ IBuilder.CreateRetVoid();
+
+ // Create a global that contains the block address in its initializer.
+ auto *BlockAddress = BlockAddress::get(NonEntryBlock);
+ new GlobalVariable(*OldM, BlockAddress->getType(), /*isConstant=*/true,
+ GlobalVariable::ExternalLinkage, BlockAddress,
+ "blockaddr");
+
// Finalize the debug info
DBuilder.finalize();
}
@@ -1266,4 +1276,13 @@ TEST_F(CloneInstruction, cloneKeyInstructions) {
#undef EXPECT_ATOM
}
+// Checks that block addresses in global initializers are properly cloned.
+TEST_F(CloneModule, GlobalWithBlockAddressesInitializer) {
+ auto *OriginalBa = cast<BlockAddress>(
+ OldM->getGlobalVariable("blockaddr")->getInitializer());
+ auto *ClonedBa = cast<BlockAddress>(
+ NewM->getGlobalVariable("blockaddr")->getInitializer());
+ ASSERT_NE(OriginalBa->getBasicBlock(), ClonedBa->getBasicBlock());
+}
+
} // namespace
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanTestBase.h b/llvm/unittests/Transforms/Vectorize/VPlanTestBase.h
index d9bd413..2a15e90 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanTestBase.h
+++ b/llvm/unittests/Transforms/Vectorize/VPlanTestBase.h
@@ -13,6 +13,7 @@
#define LLVM_UNITTESTS_TRANSFORMS_VECTORIZE_VPLANTESTBASE_H
#include "../lib/Transforms/Vectorize/VPlan.h"
+#include "../lib/Transforms/Vectorize/VPlanHelpers.h"
#include "../lib/Transforms/Vectorize/VPlanTransforms.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
@@ -72,8 +73,9 @@ protected:
PredicatedScalarEvolution PSE(*SE, *L);
DenseMap<const VPBlockBase *, BasicBlock *> VPB2IRBB;
auto Plan = VPlanTransforms::buildPlainCFG(L, *LI, VPB2IRBB);
+ VFRange R(ElementCount::getFixed(1), ElementCount::getFixed(2));
VPlanTransforms::prepareForVectorization(*Plan, IntegerType::get(*Ctx, 64),
- PSE, true, false, L, {});
+ PSE, true, false, L, {}, false, R);
VPlanTransforms::createLoopRegions(*Plan);
return Plan;
}
diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index a3a739e..1e8f3ed 100644
--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -1114,7 +1114,7 @@ bool MatchableInfo::validate(StringRef CommentDelimiter, bool IsAlias) const {
// Verify that any operand is only mentioned once.
// We reject aliases and ignore instructions for now.
if (!IsAlias && TheDef->getValueAsString("AsmMatchConverter").empty() &&
- Tok[0] == '$' && !OperandNames.insert(std::string(Tok)).second) {
+ Tok[0] == '$' && !OperandNames.insert(Tok.str()).second) {
LLVM_DEBUG({
errs() << "warning: '" << TheDef->getName() << "': "
<< "ignoring instruction with tied operand '" << Tok << "'\n";
@@ -1170,7 +1170,7 @@ static std::string getEnumNameForToken(StringRef Str) {
}
ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
- ClassInfo *&Entry = TokenClasses[std::string(Token)];
+ ClassInfo *&Entry = TokenClasses[Token.str()];
if (!Entry) {
Classes.emplace_front();
@@ -1178,7 +1178,7 @@ ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
Entry->Kind = ClassInfo::Token;
Entry->ClassName = "Token";
Entry->Name = "MCK_" + getEnumNameForToken(Token);
- Entry->ValueName = std::string(Token);
+ Entry->ValueName = Token.str();
Entry->PredicateMethod = "<invalid>";
Entry->RenderMethod = "<invalid>";
Entry->ParserMethod = "";
@@ -1347,16 +1347,17 @@ void AsmMatcherInfo::buildRegisterClasses(
CI->ClassName = RC.getName();
CI->Name = "MCK_" + RC.getName();
CI->ValueName = RC.getName();
- } else
+ } else {
CI->ValueName = CI->ValueName + "," + RC.getName();
+ }
const Init *DiagnosticType = Def->getValueInit("DiagnosticType");
if (const StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
- CI->DiagnosticType = std::string(SI->getValue());
+ CI->DiagnosticType = SI->getValue().str();
const Init *DiagnosticString = Def->getValueInit("DiagnosticString");
if (const StringInit *SI = dyn_cast<StringInit>(DiagnosticString))
- CI->DiagnosticString = std::string(SI->getValue());
+ CI->DiagnosticString = SI->getValue().str();
// If we have a diagnostic string but the diagnostic type is not specified
// explicitly, create an anonymous diagnostic type.
@@ -1376,11 +1377,12 @@ void AsmMatcherInfo::buildRegisterClasses(
assert(CI && "Missing singleton register class info!");
if (CI->ValueName.empty()) {
- CI->ClassName = std::string(Rec->getName());
+ CI->ClassName = Rec->getName().str();
CI->Name = "MCK_" + Rec->getName().str();
- CI->ValueName = std::string(Rec->getName());
- } else
+ CI->ValueName = Rec->getName().str();
+ } else {
CI->ValueName = CI->ValueName + "," + Rec->getName().str();
+ }
}
}
@@ -1413,14 +1415,14 @@ void AsmMatcherInfo::buildOperandClasses() {
else
CI->SuperClasses.push_back(SC);
}
- CI->ClassName = std::string(Rec->getValueAsString("Name"));
+ CI->ClassName = Rec->getValueAsString("Name").str();
CI->Name = "MCK_" + CI->ClassName;
- CI->ValueName = std::string(Rec->getName());
+ CI->ValueName = Rec->getName().str();
// Get or construct the predicate method name.
const Init *PMName = Rec->getValueInit("PredicateMethod");
if (const StringInit *SI = dyn_cast<StringInit>(PMName)) {
- CI->PredicateMethod = std::string(SI->getValue());
+ CI->PredicateMethod = SI->getValue().str();
} else {
assert(isa<UnsetInit>(PMName) && "Unexpected PredicateMethod field!");
CI->PredicateMethod = "is" + CI->ClassName;
@@ -1429,7 +1431,7 @@ void AsmMatcherInfo::buildOperandClasses() {
// Get or construct the render method name.
const Init *RMName = Rec->getValueInit("RenderMethod");
if (const StringInit *SI = dyn_cast<StringInit>(RMName)) {
- CI->RenderMethod = std::string(SI->getValue());
+ CI->RenderMethod = SI->getValue().str();
} else {
assert(isa<UnsetInit>(RMName) && "Unexpected RenderMethod field!");
CI->RenderMethod = "add" + CI->ClassName + "Operands";
@@ -1438,15 +1440,15 @@ void AsmMatcherInfo::buildOperandClasses() {
// Get the parse method name or leave it as empty.
const Init *PRMName = Rec->getValueInit("ParserMethod");
if (const StringInit *SI = dyn_cast<StringInit>(PRMName))
- CI->ParserMethod = std::string(SI->getValue());
+ CI->ParserMethod = SI->getValue().str();
// Get the diagnostic type and string or leave them as empty.
const Init *DiagnosticType = Rec->getValueInit("DiagnosticType");
if (const StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
- CI->DiagnosticType = std::string(SI->getValue());
+ CI->DiagnosticType = SI->getValue().str();
const Init *DiagnosticString = Rec->getValueInit("DiagnosticString");
if (const StringInit *SI = dyn_cast<StringInit>(DiagnosticString))
- CI->DiagnosticString = std::string(SI->getValue());
+ CI->DiagnosticString = SI->getValue().str();
// If we have a DiagnosticString, we need a DiagnosticType for use within
// the matcher.
if (!CI->DiagnosticString.empty() && CI->DiagnosticType.empty())
@@ -1459,7 +1461,7 @@ void AsmMatcherInfo::buildOperandClasses() {
// Get or construct the default method name.
const Init *DMName = Rec->getValueInit("DefaultMethod");
if (const StringInit *SI = dyn_cast<StringInit>(DMName)) {
- CI->DefaultMethod = std::string(SI->getValue());
+ CI->DefaultMethod = SI->getValue().str();
} else {
assert(isa<UnsetInit>(DMName) && "Unexpected DefaultMethod field!");
CI->DefaultMethod = "default" + CI->ClassName + "Operands";
@@ -1663,13 +1665,14 @@ void AsmMatcherInfo::buildInfo() {
// Add the alias to the matchables list.
NewMatchables.push_back(std::move(AliasII));
}
- } else
+ } else {
// FIXME: The tied operands checking is not yet integrated with the
// framework for reporting multiple near misses. To prevent invalid
// formats from being matched with an alias if a tied-operands check
// would otherwise have disallowed it, we just disallow such constructs
// in TableGen completely.
II->buildAliasResultOperands(!ReportMultipleNearMisses);
+ }
}
if (!NewMatchables.empty())
Matchables.insert(Matchables.end(),
@@ -2303,9 +2306,10 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
<< utostr(std::get<2>(KV.first)) << " },\n";
}
OS << "};\n\n";
- } else
+ } else {
OS << "static const uint8_t TiedAsmOperandTable[][3] = "
"{ /* empty */ {0, 0, 0} };\n\n";
+ }
OS << "namespace {\n";
@@ -2503,8 +2507,9 @@ static void emitValidateOperandClass(const CodeGenTarget &Target,
OS << " return " << Info.Target.getName() << "AsmParser::Match_"
<< CI.DiagnosticType << ";\n";
OS << " break;\n";
- } else
+ } else {
OS << " break;\n";
+ }
OS << " }\n";
}
OS << " } // end switch (Kind)\n\n";
@@ -3052,7 +3057,7 @@ static void emitAsmTiedOperandConstraints(CodeGenTarget &Target,
AsmMatcherInfo &Info, raw_ostream &OS,
bool HasOptionalOperands) {
std::string AsmParserName =
- std::string(Info.AsmParser->getValueAsString("AsmParserClassName"));
+ Info.AsmParser->getValueAsString("AsmParserClassName").str();
OS << "static bool ";
OS << "checkAsmTiedOperandConstraints(const " << Target.getName()
<< AsmParserName << "&AsmParser,\n";
diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp
index d0ec4fc8..3ecbd88 100644
--- a/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -192,7 +192,7 @@ void AsmWriterEmitter::FindUniqueOperandCommands(
InstIdxs[idx].push_back(i);
} else {
UniqueOperandCommands.push_back(std::move(Command));
- InstrsForCase.push_back(std::string(Inst.CGI->TheDef->getName()));
+ InstrsForCase.push_back(Inst.CGI->TheDef->getName().str());
InstIdxs.emplace_back();
InstIdxs.back().push_back(i);
@@ -592,9 +592,9 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName,
// "NoRegAltName" is special. We don't need to do a lookup for that,
// as it's just a reference to the default register name.
if (AltName == "" || AltName == "NoRegAltName") {
- AsmName = std::string(Reg.TheDef->getValueAsString("AsmName"));
+ AsmName = Reg.TheDef->getValueAsString("AsmName").str();
if (AsmName.empty())
- AsmName = std::string(Reg.getName());
+ AsmName = Reg.getName().str();
} else {
// Make sure the register has an alternate name for this index.
std::vector<const Record *> AltNameList =
@@ -612,7 +612,7 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName,
PrintFatalError(Reg.TheDef->getLoc(),
"Register definition missing alt name for '" +
AltName + "'.");
- AsmName = std::string(AltNames[Idx]);
+ AsmName = AltNames[Idx].str();
}
}
StringTable.add(AsmName);
@@ -660,8 +660,9 @@ void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
if (hasAltNames) {
for (const Record *R : AltNameIndices)
emitRegisterNameString(O, R->getName(), Registers);
- } else
+ } else {
emitRegisterNameString(O, "", Registers);
+ }
if (hasAltNames) {
O << " switch(AltIdx) {\n"
@@ -939,7 +940,7 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
}) -
PrintMethods.begin();
if (static_cast<unsigned>(PrintMethodIdx) == PrintMethods.size())
- PrintMethods.emplace_back(std::string(PrintMethod), IsPCRel);
+ PrintMethods.emplace_back(PrintMethod.str(), IsPCRel);
}
}
@@ -969,8 +970,9 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
if (!Rec->isValueUnset("MCOperandPredicate")) {
MCOpPredicates.push_back(Rec);
Entry = MCOpPredicates.size();
- } else
+ } else {
break; // No conditions on this operand at all
+ }
}
IAP.addCond(
std::string(formatv("AliasPatternCond::K_Custom, {}", Entry)));
diff --git a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
index 24dc327..bc42efa 100644
--- a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
+++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
@@ -444,15 +444,16 @@ void CodeGenIntrinsic::setProperty(const Record *R) {
int64_t Lower = R->getValueAsInt("Lower");
int64_t Upper = R->getValueAsInt("Upper");
addArgAttribute(ArgNo, Range, Lower, Upper);
- } else
+ } else {
llvm_unreachable("Unknown property!");
+ }
}
bool CodeGenIntrinsic::isParamAPointer(unsigned ParamIdx) const {
if (ParamIdx >= IS.ParamTys.size())
return false;
- return (IS.ParamTys[ParamIdx]->isSubClassOf("LLVMQualPointerType") ||
- IS.ParamTys[ParamIdx]->isSubClassOf("LLVMAnyPointerType"));
+ return IS.ParamTys[ParamIdx]->isSubClassOf("LLVMQualPointerType") ||
+ IS.ParamTys[ParamIdx]->isSubClassOf("LLVMAnyPointerType");
}
bool CodeGenIntrinsic::isParamImmArg(unsigned ParamIdx) const {
@@ -461,8 +462,7 @@ bool CodeGenIntrinsic::isParamImmArg(unsigned ParamIdx) const {
if (ParamIdx >= ArgumentAttributes.size())
return false;
ArgAttribute Val{ImmArg, 0, 0};
- return std::binary_search(ArgumentAttributes[ParamIdx].begin(),
- ArgumentAttributes[ParamIdx].end(), Val);
+ return llvm::binary_search(ArgumentAttributes[ParamIdx], Val);
}
void CodeGenIntrinsic::addArgAttribute(unsigned Idx, ArgAttrKind AK, uint64_t V,
diff --git a/llvm/utils/TableGen/Basic/VTEmitter.cpp b/llvm/utils/TableGen/Basic/VTEmitter.cpp
index 07840d3..040f37c 100644
--- a/llvm/utils/TableGen/Basic/VTEmitter.cpp
+++ b/llvm/utils/TableGen/Basic/VTEmitter.cpp
@@ -79,8 +79,9 @@ static void vTtoGetLlvmTyString(raw_ostream &OS, const Record *VT) {
OS << "Type::getInt" << OutputVTSize << "Ty(Context)";
else
OS << "Type::getIntNTy(Context, " << OutputVTSize << ")";
- } else
+ } else {
llvm_unreachable("Unhandled case");
+ }
if (IsVector)
OS << ", " << VT->getValueAsInt("nElem") << ")";
diff --git a/llvm/utils/TableGen/CodeEmitterGen.cpp b/llvm/utils/TableGen/CodeEmitterGen.cpp
index 475699a..83c0330 100644
--- a/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -309,8 +309,7 @@ CodeEmitterGen::getInstructionCases(const Record *R,
" case " + itostr(DefaultMode) + ": InstBitsByHw = InstBits";
} else {
Case += " case " + itostr(ModeId) +
- ": InstBitsByHw = InstBits_" +
- std::string(HWM.getMode(ModeId).Name);
+ ": InstBitsByHw = InstBits_" + HWM.getMode(ModeId).Name.str();
}
Case += "; break;\n";
}
@@ -362,7 +361,7 @@ void CodeEmitterGen::addInstructionCasesForEncoding(
if (RV.isNonconcreteOK() || RV.getValue()->isComplete())
continue;
- Success &= addCodeToMergeInOperand(R, BI, std::string(RV.getName()), Case,
+ Success &= addCodeToMergeInOperand(R, BI, RV.getName().str(), Case,
BitOffsetCase, Target);
}
// Avoid empty switches.
diff --git a/llvm/utils/TableGen/CodeGenMapTable.cpp b/llvm/utils/TableGen/CodeGenMapTable.cpp
index 2641e71..bce7278 100644
--- a/llvm/utils/TableGen/CodeGenMapTable.cpp
+++ b/llvm/utils/TableGen/CodeGenMapTable.cpp
@@ -103,7 +103,7 @@ private:
public:
InstrMap(const Record *MapRec) {
- Name = std::string(MapRec->getName());
+ Name = MapRec->getName().str();
// FilterClass - It's used to reduce the search space only to the
// instructions that define the kind of relationship modeled by
@@ -133,8 +133,8 @@ public:
// Each instruction map must specify at least one column for it to be valid.
if (ColValList->empty())
- PrintFatalError(MapRec->getLoc(), "InstrMapping record `" +
- MapRec->getName() + "' has empty " +
+ PrintFatalError(MapRec->getLoc(), "InstrMapping record `" + Name +
+ "' has empty " +
"`ValueCols' field!");
for (const Init *I : ColValList->getValues()) {
@@ -144,7 +144,7 @@ public:
// elements as the fields in 'ColFields'.
if (ColI->size() != ColFields->size())
PrintFatalError(MapRec->getLoc(),
- "Record `" + MapRec->getName() +
+ "Record `" + Name +
"', field `ValueCols' entries don't match with " +
" the entries in 'ColFields'!");
ValueCols.push_back(ColI);
diff --git a/llvm/utils/TableGen/Common/AsmWriterInst.h b/llvm/utils/TableGen/Common/AsmWriterInst.h
index 7c21eb6..26745a8 100644
--- a/llvm/utils/TableGen/Common/AsmWriterInst.h
+++ b/llvm/utils/TableGen/Common/AsmWriterInst.h
@@ -38,7 +38,7 @@ struct AsmWriterOperand {
unsigned MIOpNo = 0;
/// Str - For isLiteralTextOperand, this IS the literal text. For
- /// isMachineInstrOperand, this is the PrinterMethodName for the operand..
+ /// isMachineInstrOperand, this is the PrinterMethodName for the operand.
/// For isLiteralStatementOperand, this is the code to insert verbatim
/// into the asm writer.
std::string Str;
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
index febcb1f..b6bc528 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
@@ -1149,7 +1149,7 @@ std::string TreePredicateFn::getPredCode() const {
Code += "if (!N->hasNUsesOfValue(1, 0)) return false;\n";
std::string PredicateCode =
- std::string(PatFragRec->getRecord()->getValueAsString("PredicateCode"));
+ PatFragRec->getRecord()->getValueAsString("PredicateCode").str();
Code += PredicateCode;
@@ -1164,8 +1164,7 @@ bool TreePredicateFn::hasImmCode() const {
}
std::string TreePredicateFn::getImmCode() const {
- return std::string(
- PatFragRec->getRecord()->getValueAsString("ImmediateCode"));
+ return PatFragRec->getRecord()->getValueAsString("ImmediateCode").str();
}
bool TreePredicateFn::immCodeUsesAPInt() const {
@@ -1286,11 +1285,13 @@ const Record *TreePredicateFn::getScalarMemoryVT() const {
return nullptr;
return R->getValueAsDef("ScalarMemoryVT");
}
+
bool TreePredicateFn::hasGISelPredicateCode() const {
return !PatFragRec->getRecord()
->getValueAsString("GISelPredicateCode")
.empty();
}
+
std::string TreePredicateFn::getGISelPredicateCode() const {
return std::string(
PatFragRec->getRecord()->getValueAsString("GISelPredicateCode"));
@@ -2405,8 +2406,9 @@ TreePatternNode::getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const {
if (!DI)
return nullptr;
Rec = DI->getDef();
- } else
+ } else {
Rec = getOperator();
+ }
if (!Rec->isSubClassOf("ComplexPattern"))
return nullptr;
@@ -2915,7 +2917,7 @@ TreePatternNodePtr TreePattern::ParseTreePattern(const Init *TheInit,
if (R->getName() == "node" && !OpName.empty()) {
if (OpName.empty())
error("'node' argument requires a name to match with operand list");
- Args.push_back(std::string(OpName));
+ Args.push_back(OpName.str());
}
Res->setName(OpName);
@@ -2927,7 +2929,7 @@ TreePatternNodePtr TreePattern::ParseTreePattern(const Init *TheInit,
if (OpName.empty())
error("'?' argument requires a name to match with operand list");
TreePatternNodePtr Res = makeIntrusiveRefCnt<TreePatternNode>(TheInit, 1);
- Args.push_back(std::string(OpName));
+ Args.push_back(OpName.str());
Res->setName(OpName);
return Res;
}
@@ -3167,7 +3169,7 @@ bool TreePattern::InferAllTypes(
if (InNamedTypes) {
auto InIter = InNamedTypes->find(Entry.getKey());
if (InIter == InNamedTypes->end()) {
- error("Node '" + std::string(Entry.getKey()) +
+ error("Node '" + Entry.getKey().str() +
"' in output pattern but not input pattern");
return true;
}
@@ -3299,7 +3301,7 @@ void CodeGenDAGPatterns::ParseNodeTransforms() {
reverse(Records.getAllDerivedDefinitions("SDNodeXForm"))) {
const Record *SDNode = XFormNode->getValueAsDef("Opcode");
StringRef Code = XFormNode->getValueAsString("XFormFunction");
- SDNodeXForms.insert({XFormNode, NodeXForm(SDNode, std::string(Code))});
+ SDNodeXForms.insert({XFormNode, NodeXForm(SDNode, Code.str())});
}
}
@@ -3358,7 +3360,7 @@ void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) {
if (!OperandsSet.erase(ArgNameStr))
P->error("'" + ArgNameStr +
"' does not occur in pattern or was multiply specified!");
- Args.push_back(std::string(ArgNameStr));
+ Args.push_back(ArgNameStr.str());
}
if (!OperandsSet.empty())
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
index 328700c..364c82e 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
@@ -406,7 +406,7 @@ class ScopedName {
public:
ScopedName(unsigned Scope, StringRef Identifier)
- : Scope(Scope), Identifier(std::string(Identifier)) {
+ : Scope(Scope), Identifier(Identifier.str()) {
assert(Scope != 0 &&
"Scope == 0 is used to indicate predicates without arguments");
}
diff --git a/llvm/utils/TableGen/Common/CodeGenHwModes.cpp b/llvm/utils/TableGen/Common/CodeGenHwModes.cpp
index 9996b5a..0976511 100644
--- a/llvm/utils/TableGen/Common/CodeGenHwModes.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenHwModes.cpp
@@ -20,7 +20,7 @@ StringRef CodeGenHwModes::DefaultModeName = "DefaultMode";
HwMode::HwMode(const Record *R) {
Name = R->getName();
- Features = std::string(R->getValueAsString("Features"));
+ Features = R->getValueAsString("Features").str();
SmallString<128> PredicateCheck;
raw_svector_ostream OS(PredicateCheck);
diff --git a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
index 94c0d51..f33deb0 100644
--- a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
@@ -40,8 +40,7 @@ bool CodeGenInstAlias::tryAliasOpMatch(const DagInit *Result,
if (!Result->getArgName(AliasOpNo))
PrintFatalError(Loc, "result argument #" + Twine(AliasOpNo) +
" must have a name!");
- ResOp = ResultOperand(std::string(Result->getArgNameStr(AliasOpNo)),
- ResultRecord);
+ ResOp = ResultOperand(Result->getArgNameStr(AliasOpNo).str(), ResultRecord);
return true;
}
@@ -59,8 +58,7 @@ bool CodeGenInstAlias::tryAliasOpMatch(const DagInit *Result,
if (!T.getRegisterClass(InstOpRec).hasSubClass(
&T.getRegisterClass(ADI->getDef())))
return false;
- ResOp = ResultOperand(std::string(Result->getArgNameStr(AliasOpNo)),
- ResultRecord);
+ ResOp = ResultOperand(Result->getArgNameStr(AliasOpNo).str(), ResultRecord);
return true;
}
@@ -141,8 +139,8 @@ bool CodeGenInstAlias::tryAliasOpMatch(const DagInit *Result,
// MIOperandInfo perhaps?
if (InstOpRec->getValueInit("Type") != ADI->getDef()->getValueInit("Type"))
return false;
- ResOp = ResultOperand(std::string(Result->getArgNameStr(AliasOpNo)),
- ADI->getDef());
+ ResOp =
+ ResultOperand(Result->getArgNameStr(AliasOpNo).str(), ADI->getDef());
return true;
}
@@ -169,7 +167,7 @@ unsigned CodeGenInstAlias::ResultOperand::getMINumOperands() const {
CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T)
: TheDef(R) {
Result = R->getValueAsDag("ResultInst");
- AsmString = std::string(R->getValueAsString("AsmString"));
+ AsmString = R->getValueAsString("AsmString");
// Verify that the root of the result is an instruction.
const DefInit *DI = dyn_cast<DefInit>(Result->getOperator());
diff --git a/llvm/utils/TableGen/Common/CodeGenInstruction.cpp b/llvm/utils/TableGen/Common/CodeGenInstruction.cpp
index ecef9ca..281df230 100644
--- a/llvm/utils/TableGen/Common/CodeGenInstruction.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenInstruction.cpp
@@ -34,9 +34,10 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
PrintFatalError(R->getLoc(),
R->getName() +
": invalid def name for output list: use 'outs'");
- } else
+ } else {
PrintFatalError(R->getLoc(),
R->getName() + ": invalid output list: use 'outs'");
+ }
NumDefs = OutDI->getNumArgs();
@@ -46,9 +47,10 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
PrintFatalError(R->getLoc(),
R->getName() +
": invalid def name for input list: use 'ins'");
- } else
+ } else {
PrintFatalError(R->getLoc(),
R->getName() + ": invalid input list: use 'ins'");
+ }
unsigned MIOperandNo = 0;
std::set<std::string> OperandNames;
@@ -83,16 +85,16 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
unsigned NumOps = 1;
const DagInit *MIOpInfo = nullptr;
if (Rec->isSubClassOf("RegisterOperand")) {
- PrintMethod = std::string(Rec->getValueAsString("PrintMethod"));
- OperandType = std::string(Rec->getValueAsString("OperandType"));
- OperandNamespace = std::string(Rec->getValueAsString("OperandNamespace"));
- EncoderMethod = std::string(Rec->getValueAsString("EncoderMethod"));
+ PrintMethod = Rec->getValueAsString("PrintMethod").str();
+ OperandType = Rec->getValueAsString("OperandType").str();
+ OperandNamespace = Rec->getValueAsString("OperandNamespace").str();
+ EncoderMethod = Rec->getValueAsString("EncoderMethod").str();
} else if (Rec->isSubClassOf("Operand")) {
- PrintMethod = std::string(Rec->getValueAsString("PrintMethod"));
- OperandType = std::string(Rec->getValueAsString("OperandType"));
- OperandNamespace = std::string(Rec->getValueAsString("OperandNamespace"));
+ PrintMethod = Rec->getValueAsString("PrintMethod").str();
+ OperandType = Rec->getValueAsString("OperandType").str();
+ OperandNamespace = Rec->getValueAsString("OperandNamespace").str();
// If there is an explicit encoder method, use it.
- EncoderMethod = std::string(Rec->getValueAsString("EncoderMethod"));
+ EncoderMethod = Rec->getValueAsString("EncoderMethod").str();
MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
// Verify that MIOpInfo has an 'ops' root value.
@@ -130,14 +132,14 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
PrintFatalError(R->getLoc(), "In instruction '" + R->getName() +
"', operand #" + Twine(i) +
" has no name!");
- if (!OperandNames.insert(std::string(ArgName)).second)
+ if (!OperandNames.insert(ArgName.str()).second)
PrintFatalError(R->getLoc(),
"In instruction '" + R->getName() + "', operand #" +
Twine(i) +
" has the same name as a previous operand!");
OperandInfo &OpInfo = OperandList.emplace_back(
- Rec, std::string(ArgName), std::string(std::move(PrintMethod)),
+ Rec, ArgName.str(), std::string(std::move(PrintMethod)),
OperandNamespace + "::" + OperandType, MIOperandNo, NumOps, MIOpInfo);
if (SubArgDag) {
@@ -161,7 +163,7 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) {
PrintFatalError(R->getLoc(), "In instruction '" + R->getName() +
"', operand #" + Twine(i) +
" has no name!");
- if (!OperandNames.insert(std::string(SubArgName)).second)
+ if (!OperandNames.insert(SubArgName.str()).second)
PrintFatalError(R->getLoc(),
"In instruction '" + R->getName() + "', operand #" +
Twine(i) + " sub-arg #" + Twine(j) +
@@ -433,7 +435,7 @@ void CGIOperandList::ProcessDisableEncoding(StringRef DisableEncoding) {
CodeGenInstruction::CodeGenInstruction(const Record *R)
: TheDef(R), Operands(R), InferredFrom(nullptr) {
Namespace = R->getValueAsString("Namespace");
- AsmString = std::string(R->getValueAsString("AsmString"));
+ AsmString = R->getValueAsString("AsmString").str();
isPreISelOpcode = R->getValueAsBit("isPreISelOpcode");
isReturn = R->getValueAsBit("isReturn");
@@ -501,8 +503,7 @@ CodeGenInstruction::CodeGenInstruction(const Record *R)
// First check for a ComplexDeprecationPredicate.
if (R->getValue("ComplexDeprecationPredicate")) {
HasComplexDeprecationPredicate = true;
- DeprecatedReason =
- std::string(R->getValueAsString("ComplexDeprecationPredicate"));
+ DeprecatedReason = R->getValueAsString("ComplexDeprecationPredicate").str();
} else if (const RecordVal *Dep = R->getValue("DeprecatedFeatureMask")) {
// Check if we have a Subtarget feature mask.
HasComplexDeprecationPredicate = false;
diff --git a/llvm/utils/TableGen/Common/CodeGenSchedule.cpp b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp
index 8e8b319..8cd8fb6 100644
--- a/llvm/utils/TableGen/Common/CodeGenSchedule.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp
@@ -121,7 +121,7 @@ struct InstRegexOp : public SetTheory::Operator {
StringRef PatStr = Original.substr(FirstMeta);
if (!PatStr.empty()) {
// For the rest use a python-style prefix match.
- std::string pat = std::string(PatStr);
+ std::string pat = PatStr.str();
// Add ^ anchor. If we had one originally, don't need the group.
if (HadAnchor) {
pat.insert(0, "^");
@@ -544,7 +544,7 @@ void CodeGenSchedModels::addProcModel(const Record *ProcDef) {
if (!ProcModelMap.try_emplace(ModelKey, ProcModels.size()).second)
return;
- std::string Name = std::string(ModelKey->getName());
+ std::string Name = ModelKey->getName().str();
if (ModelKey->isSubClassOf("SchedMachineModel")) {
const Record *ItinsDef = ModelKey->getValueAsDef("Itineraries");
ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef);
@@ -938,7 +938,7 @@ CodeGenSchedModels::createSchedClassName(const Record *ItinClassDef,
ArrayRef<unsigned> OperReads) {
std::string Name;
if (ItinClassDef && ItinClassDef->getName() != "NoItinerary")
- Name = std::string(ItinClassDef->getName());
+ Name = ItinClassDef->getName().str();
for (unsigned Idx : OperWrites) {
if (!Name.empty())
Name += '_';
@@ -2016,8 +2016,9 @@ void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead,
if (Alias->getValueInit("SchedModel")->isComplete()) {
AliasProcIndices.push_back(
getProcModel(Alias->getValueAsDef("SchedModel")).Index);
- } else
+ } else {
AliasProcIndices = ProcIndices;
+ }
const CodeGenSchedRW &AliasRW = getSchedRW(Alias->getValueAsDef("AliasRW"));
assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes");
diff --git a/llvm/utils/TableGen/Common/CodeGenSchedule.h b/llvm/utils/TableGen/Common/CodeGenSchedule.h
index 8343257..697a1ce 100644
--- a/llvm/utils/TableGen/Common/CodeGenSchedule.h
+++ b/llvm/utils/TableGen/Common/CodeGenSchedule.h
@@ -64,7 +64,7 @@ struct CodeGenSchedRW {
HasVariants(false), IsVariadic(false), IsSequence(false) {}
CodeGenSchedRW(unsigned Idx, const Record *Def)
: Index(Idx), TheDef(Def), IsAlias(false), IsVariadic(false) {
- Name = std::string(Def->getName());
+ Name = Def->getName().str();
IsRead = Def->isSubClassOf("SchedRead");
HasVariants = Def->isSubClassOf("SchedVariant");
if (HasVariants)
diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.cpp b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
index e8286d2..3169019 100644
--- a/llvm/utils/TableGen/Common/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
@@ -394,7 +394,7 @@ bool CodeGenTarget::guessInstructionProperties() const {
ComplexPattern::ComplexPattern(const Record *R) {
Ty = R->getValueAsDef("Ty");
NumOperands = R->getValueAsInt("NumOperands");
- SelectFunc = std::string(R->getValueAsString("SelectFunc"));
+ SelectFunc = R->getValueAsString("SelectFunc").str();
RootNodes = R->getValueAsListOfDefs("RootNodes");
// FIXME: This is a hack to statically increase the priority of patterns which
diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
index 4c809b4..2cb3579 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
@@ -2015,9 +2015,10 @@ void TempRegRenderer::emitRenderOpcodes(MatchTable &Table,
if (SubRegIdx) {
assert(!IsDef);
Table << MatchTable::Opcode("GIR_AddTempSubRegister");
- } else
+ } else {
Table << MatchTable::Opcode(NeedsFlags ? "GIR_AddTempRegister"
: "GIR_AddSimpleTempRegister");
+ }
Table << MatchTable::Comment("InsnID") << MatchTable::ULEB128Value(InsnID)
<< MatchTable::Comment("TempRegID")
@@ -2035,8 +2036,9 @@ void TempRegRenderer::emitRenderOpcodes(MatchTable &Table,
if (IsDead)
RegFlags += "|RegState::Dead";
Table << MatchTable::NamedValue(2, RegFlags);
- } else
+ } else {
Table << MatchTable::IntValue(2, 0);
+ }
if (SubRegIdx)
Table << MatchTable::NamedValue(2, SubRegIdx->getQualifiedName());
@@ -2064,8 +2066,9 @@ void ImmRenderer::emitRenderOpcodes(MatchTable &Table,
<< MatchTable::ULEB128Value(InsnID) << MatchTable::Comment("Type")
<< *CImmLLT << MatchTable::Comment("Imm")
<< MatchTable::IntValue(8, Imm) << MatchTable::LineBreak;
- } else
+ } else {
emitAddImm(Table, Rule, InsnID, Imm);
+ }
}
//===- SubRegIndexRenderer ------------------------------------------------===//
@@ -2156,8 +2159,9 @@ bool BuildMIAction::canMutate(RuleMatcher &Rule,
if (Insn != &OM.getInstructionMatcher() ||
OM.getOpIdx() != Renderer.index())
return false;
- } else
+ } else {
return false;
+ }
}
return true;
diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
index 77c8bc2..9f17882 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
@@ -1285,7 +1285,7 @@ public:
StringRef getSymbolicName() const { return SymbolicName; }
void setSymbolicName(StringRef Name) {
assert(SymbolicName.empty() && "Operand already has a symbolic name");
- SymbolicName = std::string(Name);
+ SymbolicName = Name.str();
}
/// Construct a new operand predicate and add it to the matcher.
@@ -2321,8 +2321,7 @@ private:
std::string S;
public:
- DebugCommentAction(StringRef S)
- : MatchAction(AK_DebugComment), S(std::string(S)) {}
+ DebugCommentAction(StringRef S) : MatchAction(AK_DebugComment), S(S.str()) {}
static bool classof(const MatchAction *A) {
return A->getKind() == AK_DebugComment;
diff --git a/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
index cb423ce..e62f63a 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
@@ -134,8 +134,9 @@ PatternParser::parseInstructionPattern(const Init &Arg, StringRef Name) {
getDagWithOperatorOfSubClass(Arg, BuiltinPattern::ClassName)) {
Pat = std::make_unique<BuiltinPattern>(*BP->getOperatorAsDef(DiagLoc),
insertStrRef(Name));
- } else
+ } else {
return nullptr;
+ }
for (unsigned K = 0; K < DagPat->getNumArgs(); ++K) {
const Init *Arg = DagPat->getArg(K);
diff --git a/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp b/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp
index 0b84a9b..9817618 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp
@@ -840,8 +840,9 @@ bool PatFragPattern::mapInputCodeExpansions(const CodeExpansions &ParentCEs,
if (It == ParentCEs.end()) {
if (!PF.handleUnboundInParam(ParamName, ArgName, DiagLoc))
return false;
- } else
+ } else {
PatFragCEs.declare(ParamName, It->second);
+ }
continue;
}
diff --git a/llvm/utils/TableGen/CompressInstEmitter.cpp b/llvm/utils/TableGen/CompressInstEmitter.cpp
index ea617b2..b981d38 100644
--- a/llvm/utils/TableGen/CompressInstEmitter.cpp
+++ b/llvm/utils/TableGen/CompressInstEmitter.cpp
@@ -278,8 +278,9 @@ void CompressInstEmitter::addDagOperandMapping(const Record *Rec,
<< (IsSourceInst ? "input " : "output ")
<< "Dag. No validation time check possible for values of "
"fixed immediate.\n");
- } else
+ } else {
llvm_unreachable("Unhandled CompressPat argument type!");
+ }
}
}
}
diff --git a/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index 8b0f48a..13ab216 100644
--- a/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -439,8 +439,9 @@ unsigned MatcherTableEmitter::EmitMatcher(const Matcher *N,
if (!OmitComments) {
OS << "/*" << format_decimal(CurrentIdx, IndexWidth) << "*/";
OS.indent(Indent) << "/*Scope*/ ";
- } else
+ } else {
OS.indent(Indent);
+ }
}
unsigned ChildSize = SM->getChild(i)->getSize();
@@ -558,8 +559,9 @@ unsigned MatcherTableEmitter::EmitMatcher(const Matcher *N,
if (PredNo < 8) {
OperandBytes = -1;
OS << "OPC_CheckPredicate" << PredNo << ", ";
- } else
+ } else {
OS << "OPC_CheckPredicate, ";
+ }
}
if (PredNo >= 8 || Pred.usesOperands())
@@ -916,9 +918,10 @@ unsigned MatcherTableEmitter::EmitMatcher(const Matcher *N,
OS << "OPC_EmitCopyToReg" << Slot << ", "
<< getQualifiedName(Reg->TheDef) << ",\n";
--Bytes;
- } else
+ } else {
OS << "OPC_EmitCopyToReg, " << Slot << ", "
<< getQualifiedName(Reg->TheDef) << ",\n";
+ }
}
return Bytes;
@@ -1042,8 +1045,9 @@ unsigned MatcherTableEmitter::EmitMatcher(const Matcher *N,
OS.indent(FullIndexWidth + Indent)
<< "// Dst: " << SNT->getPattern().getDstPattern() << '\n';
}
- } else
+ } else {
OS << '\n';
+ }
return 4 + !CompressVTs + !CompressNodeInfo + NumTypeBytes +
NumOperandBytes + NumCoveredBytes;
@@ -1237,8 +1241,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(raw_ostream &OS) {
OS << "// " << NodeXForms[i]->getName();
OS << '\n';
- std::string ClassName =
- std::string(CGP.getSDNodeInfo(SDNode).getSDClassName());
+ std::string ClassName = CGP.getSDNodeInfo(SDNode).getSDClassName().str();
if (ClassName == "SDNode")
OS << " SDNode *N = V.getNode();\n";
else
diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index 55213b7..aec9a87 100644
--- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -306,7 +306,7 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode &N,
// The "name" of a non-leaf complex pattern (MY_PAT $op1, $op2) is
// "MY_PAT:op1:op2". We should already have validated that the uses are
// consistent.
- std::string PatternName = std::string(N.getOperator()->getName());
+ std::string PatternName = N.getOperator()->getName().str();
for (const TreePatternNode &Child : N.children()) {
PatternName += ":";
PatternName += Child.getName();
diff --git a/llvm/utils/TableGen/DFAEmitter.cpp b/llvm/utils/TableGen/DFAEmitter.cpp
index eb0b0b4..0b90af2 100644
--- a/llvm/utils/TableGen/DFAEmitter.cpp
+++ b/llvm/utils/TableGen/DFAEmitter.cpp
@@ -257,7 +257,7 @@ void Automaton::emit(raw_ostream &OS) {
StringRef Name = R->getName();
- CustomDfaEmitter Emitter(Actions, std::string(Name) + "Action");
+ CustomDfaEmitter Emitter(Actions, Name.str() + "Action");
// Starting from the initial state, build up a list of possible states and
// transitions.
std::deque<uint64_t> Worklist(1, 0);
@@ -322,7 +322,7 @@ Transition::Transition(const Record *R, Automaton *Parent) {
Actions.emplace_back(static_cast<unsigned>(R->getValueAsInt(A)));
Types.emplace_back("unsigned");
} else if (isa<StringRecTy>(SymbolV->getType())) {
- Actions.emplace_back(std::string(R->getValueAsString(A)));
+ Actions.emplace_back(R->getValueAsString(A).str());
Types.emplace_back("std::string");
} else {
report_fatal_error("Unhandled symbol type!");
@@ -330,7 +330,7 @@ Transition::Transition(const Record *R, Automaton *Parent) {
StringRef TypeOverride = Parent->getActionSymbolType(A);
if (!TypeOverride.empty())
- Types.back() = std::string(TypeOverride);
+ Types.back() = TypeOverride.str();
}
}
diff --git a/llvm/utils/TableGen/DFAPacketizerEmitter.cpp b/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
index a6c0d09..8cb2c22 100644
--- a/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
+++ b/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
@@ -92,7 +92,7 @@ public:
} // end anonymous namespace
DFAPacketizerEmitter::DFAPacketizerEmitter(const RecordKeeper &R)
- : TargetName(std::string(CodeGenTarget(R).getName())), Records(R) {}
+ : TargetName(CodeGenTarget(R).getName().str()), Records(R) {}
int DFAPacketizerEmitter::collectAllFuncUnits(
ArrayRef<const CodeGenProcModel *> ProcModels) {
@@ -119,7 +119,7 @@ int DFAPacketizerEmitter::collectAllFuncUnits(
assert((j < DFA_MAX_RESOURCES) &&
"Exceeded maximum number of representable resources");
uint64_t FuncResources = 1ULL << j;
- FUNameToBitsMap[std::string(FUs[j]->getName())] = FuncResources;
+ FUNameToBitsMap[FUs[j]->getName().str()] = FuncResources;
LLVM_DEBUG(dbgs() << " " << FUs[j]->getName() << ":0x"
<< Twine::utohexstr(FuncResources));
}
@@ -152,13 +152,13 @@ int DFAPacketizerEmitter::collectAllComboFuncs(
const Record *ComboFunc = FuncData->getValueAsDef("TheComboFunc");
const std::vector<const Record *> FuncList =
FuncData->getValueAsListOfDefs("FuncList");
- const std::string &ComboFuncName = std::string(ComboFunc->getName());
+ const std::string &ComboFuncName = ComboFunc->getName().str();
uint64_t ComboBit = FUNameToBitsMap[ComboFuncName];
uint64_t ComboResources = ComboBit;
LLVM_DEBUG(dbgs() << " combo: " << ComboFuncName << ":0x"
<< Twine::utohexstr(ComboResources) << "\n");
for (const Record *K : FuncList) {
- std::string FuncName = std::string(K->getName());
+ std::string FuncName = K->getName().str();
uint64_t FuncResources = FUNameToBitsMap[FuncName];
LLVM_DEBUG(dbgs() << " " << FuncName << ":0x"
<< Twine::utohexstr(FuncResources) << "\n");
@@ -181,7 +181,7 @@ DFAPacketizerEmitter::getResourcesForItinerary(const Record *Itinerary) {
for (const Record *StageDef : Itinerary->getValueAsListOfDefs("Stages")) {
uint64_t StageResources = 0;
for (const Record *Unit : StageDef->getValueAsListOfDefs("Units")) {
- StageResources |= FUNameToBitsMap[std::string(Unit->getName())];
+ StageResources |= FUNameToBitsMap[Unit->getName().str()];
}
if (StageResources != 0)
Resources.push_back(StageResources);
@@ -220,7 +220,7 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) {
for (const CodeGenProcModel &ProcModel : CGS.procModels()) {
if (ProcModel.hasItineraries()) {
auto NS = ProcModel.ItinsDef->getValueAsString("PacketizerNamespace");
- ItinsByNamespace[std::string(NS)].push_back(&ProcModel);
+ ItinsByNamespace[NS.str()].push_back(&ProcModel);
}
}
diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp
index eb3b300..4536d3d 100644
--- a/llvm/utils/TableGen/DecoderEmitter.cpp
+++ b/llvm/utils/TableGen/DecoderEmitter.cpp
@@ -1791,7 +1791,7 @@ static std::string findOperandDecoderMethod(const Record *Record) {
const StringInit *String =
DecoderString ? dyn_cast<StringInit>(DecoderString->getValue()) : nullptr;
if (String) {
- Decoder = std::string(String->getValue());
+ Decoder = String->getValue().str();
if (!Decoder.empty())
return Decoder;
}
@@ -1917,7 +1917,7 @@ static void addOneOperandFields(const Record &EncodingDef, const BitsInit &Bits,
Var = dyn_cast<VarInit>(Bits.getBit(J));
}
if (!Var || (Var->getName() != OpName &&
- Var->getName() != TiedNames[std::string(OpName)]))
+ Var->getName() != TiedNames[OpName.str()]))
break;
}
if (I == J)
@@ -2354,7 +2354,7 @@ static void collectHwModesReferencedForEncodings(
for (const HwModeSelect::PairType &P : MS.second.Items) {
if (P.second->isSubClassOf("InstructionEncoding")) {
std::string DecoderNamespace =
- std::string(P.second->getValueAsString("DecoderNamespace"));
+ P.second->getValueAsString("DecoderNamespace").str();
if (P.first == DefaultMode) {
NamespacesWithHwModes[DecoderNamespace].insert("");
} else {
@@ -2387,7 +2387,7 @@ handleHwModesUnrelatedEncodings(const CodeGenInstruction *Instr,
}
case SUPPRESSION_LEVEL1: {
std::string DecoderNamespace =
- std::string(InstDef->getValueAsString("DecoderNamespace"));
+ InstDef->getValueAsString("DecoderNamespace").str();
auto It = NamespacesWithHwModes.find(DecoderNamespace);
if (It != NamespacesWithHwModes.end()) {
for (StringRef HwModeName : It->second)
@@ -2506,10 +2506,9 @@ namespace {
InstrLen[NEI] = Len;
}
std::string DecoderNamespace =
- std::string(EncodingDef->getValueAsString("DecoderNamespace"));
+ EncodingDef->getValueAsString("DecoderNamespace").str();
if (!NumberedEncoding.HwModeName.empty())
- DecoderNamespace +=
- std::string("_") + NumberedEncoding.HwModeName.str();
+ DecoderNamespace += "_" + NumberedEncoding.HwModeName.str();
OpcMap[{DecoderNamespace, Size}].emplace_back(
NEI, Target.getInstrIntValue(Def));
} else {
diff --git a/llvm/utils/TableGen/ExegesisEmitter.cpp b/llvm/utils/TableGen/ExegesisEmitter.cpp
index a5dd299..1b4b072 100644
--- a/llvm/utils/TableGen/ExegesisEmitter.cpp
+++ b/llvm/utils/TableGen/ExegesisEmitter.cpp
@@ -103,7 +103,7 @@ ExegesisEmitter::ExegesisEmitter(const RecordKeeper &RK)
PrintFatalError("No 'Target' subclasses defined!");
if (Targets.size() != 1)
PrintFatalError("Multiple subclasses of Target defined!");
- Target = std::string(Targets[0]->getName());
+ Target = Targets[0]->getName().str();
}
struct ValidationCounterInfo {
diff --git a/llvm/utils/TableGen/FastISelEmitter.cpp b/llvm/utils/TableGen/FastISelEmitter.cpp
index 26625da..9aa6ec1 100644
--- a/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -280,9 +280,9 @@ struct OperandsSignature {
RC = &Target.getRegisterClass(OpLeafRec);
else if (OpLeafRec->isSubClassOf("Register"))
RC = Target.getRegBank().getRegClassForRegister(OpLeafRec);
- else if (OpLeafRec->isSubClassOf("ValueType")) {
+ else if (OpLeafRec->isSubClassOf("ValueType"))
RC = OrigDstRC;
- } else
+ else
return false;
// For now, this needs to be a register class of some sort.
@@ -294,8 +294,9 @@ struct OperandsSignature {
if (DstRC) {
if (DstRC != RC && !DstRC->hasSubClass(RC))
return false;
- } else
+ } else {
DstRC = RC;
+ }
Operands.push_back(OpKind::getReg());
}
return true;
@@ -417,7 +418,7 @@ private:
static std::string getOpcodeName(const Record *Op,
const CodeGenDAGPatterns &CGP) {
- return std::string(CGP.getSDNodeInfo(Op).getEnumName());
+ return CGP.getSDNodeInfo(Op).getEnumName().str();
}
static std::string getLegalCName(std::string OpName) {
@@ -715,20 +716,19 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
const PredMap &PM = RI.second;
OS << "Register fastEmit_" << getLegalCName(Opcode) << "_"
- << getLegalCName(std::string(getEnumName(VT))) << "_"
- << getLegalCName(std::string(getEnumName(RetVT))) << "_";
+ << getLegalCName(getEnumName(VT).str()) << "_"
+ << getLegalCName(getEnumName(RetVT).str()) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(";
Operands.PrintParameters(OS);
OS << ") {\n";
- emitInstructionCode(OS, Operands, PM,
- std::string(getEnumName(RetVT)));
+ emitInstructionCode(OS, Operands, PM, getEnumName(RetVT).str());
}
// Emit one function for the type that demultiplexes on return type.
OS << "Register fastEmit_" << getLegalCName(Opcode) << "_"
- << getLegalCName(std::string(getEnumName(VT))) << "_";
+ << getLegalCName(getEnumName(VT).str()) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(MVT RetVT";
if (!Operands.empty())
@@ -739,8 +739,8 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
MVT::SimpleValueType RetVT = RI.first;
OS << " case " << getEnumName(RetVT) << ": return fastEmit_"
<< getLegalCName(Opcode) << "_"
- << getLegalCName(std::string(getEnumName(VT))) << "_"
- << getLegalCName(std::string(getEnumName(RetVT))) << "_";
+ << getLegalCName(getEnumName(VT).str()) << "_"
+ << getLegalCName(getEnumName(RetVT).str()) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(";
Operands.PrintArguments(OS);
@@ -751,7 +751,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
} else {
// Non-variadic return type.
OS << "Register fastEmit_" << getLegalCName(Opcode) << "_"
- << getLegalCName(std::string(getEnumName(VT))) << "_";
+ << getLegalCName(getEnumName(VT).str()) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(MVT RetVT";
if (!Operands.empty())
@@ -779,7 +779,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
OS << " switch (VT.SimpleTy) {\n";
for (const auto &TI : TM) {
MVT::SimpleValueType VT = TI.first;
- std::string TypeName = std::string(getEnumName(VT));
+ std::string TypeName = getEnumName(VT).str();
OS << " case " << TypeName << ": return fastEmit_"
<< getLegalCName(Opcode) << "_" << getLegalCName(TypeName) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
diff --git a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
index 4e491f8..36c0cf5 100644
--- a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
@@ -1277,8 +1277,9 @@ bool CombineRuleBuilder::checkSemantics() {
"patterns");
return false;
}
- } else
+ } else {
IsUsingCXXPatterns = isa<CXXPattern>(Pat);
+ }
assert(Pat);
const auto *IP = dyn_cast<InstructionPattern>(Pat);
@@ -1610,8 +1611,9 @@ bool CombineRuleBuilder::emitMatchPattern(CodeExpansions &CE,
return false;
} else if (isa<BuiltinPattern>(&IP)) {
llvm_unreachable("No match builtins known!");
- } else
+ } else {
llvm_unreachable("Unknown kind of InstructionPattern!");
+ }
// Emit remaining patterns
const bool IsUsingCustomCXXAction = hasOnlyCXXApplyPatterns();
diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 8d0403a..092dba5 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -756,9 +756,10 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
if (isa<IntInit>(SrcInit)) {
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(
&Target.getInstruction(RK.getDef("G_CONSTANT")));
- } else
+ } else {
return failedImport(
"Unable to deduce gMIR opcode to handle Src (which is a leaf)");
+ }
} else {
SrcGIEquivOrNull = findNodeEquiv(Src.getOperator());
if (!SrcGIEquivOrNull)
@@ -850,9 +851,10 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
OperandMatcher &OM =
InsnMatcher.addOperand(OpIdx++, Src.getName(), TempOpIdx);
OM.addPredicate<LiteralIntOperandMatcher>(SrcIntInit->getValue());
- } else
+ } else {
return failedImport(
"Unable to deduce gMIR opcode to handle Src (which is a leaf)");
+ }
} else {
assert(SrcGIOrNull &&
"Expected to have already found an equivalent Instruction");
@@ -887,7 +889,7 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
: CCDef->getValueAsString("ICmpPredicate");
if (!PredType.empty()) {
- OM.addPredicate<CmpPredicateOperandMatcher>(std::string(PredType));
+ OM.addPredicate<CmpPredicateOperandMatcher>(PredType.str());
// Process the other 2 operands normally.
--NumChildren;
}
@@ -988,12 +990,12 @@ Error GlobalISelEmitter::importChildMatcher(
bool OperandIsImmArg, unsigned OpIdx, unsigned &TempOpIdx) {
const Record *PhysReg = nullptr;
- std::string SrcChildName = std::string(getSrcChildName(SrcChild, PhysReg));
+ std::string SrcChildName = getSrcChildName(SrcChild, PhysReg).str();
if (!SrcChild.isLeaf() &&
SrcChild.getOperator()->isSubClassOf("ComplexPattern")) {
// The "name" of a non-leaf complex pattern (MY_PAT $op1, $op2) is
// "MY_PAT:op1:op2" and the ones with same "name" represent same operand.
- std::string PatternName = std::string(SrcChild.getOperator()->getName());
+ std::string PatternName = SrcChild.getOperator()->getName().str();
for (const TreePatternNode &Child : SrcChild.children()) {
PatternName += ":";
PatternName += Child.getName();
diff --git a/llvm/utils/TableGen/InstrDocsEmitter.cpp b/llvm/utils/TableGen/InstrDocsEmitter.cpp
index 54ca7d8..a8a234675 100644
--- a/llvm/utils/TableGen/InstrDocsEmitter.cpp
+++ b/llvm/utils/TableGen/InstrDocsEmitter.cpp
@@ -67,7 +67,7 @@ static void EmitInstrDocs(const RecordKeeper &RK, raw_ostream &OS) {
unsigned VariantCount = Target.getAsmParserVariantCount();
// Page title.
- std::string Title = std::string(Target.getName());
+ std::string Title = Target.getName().str();
Title += " Instructions";
writeTitle(Title, OS);
OS << "\n";
diff --git a/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp b/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp
index ce509c7..0b77586b 100644
--- a/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp
+++ b/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp
@@ -263,10 +263,11 @@ void MacroFusionPredicatorEmitter::emitBothPredicate(const Record *Predicate,
OS.indent(2) << " return false;";
}
OS << "\n";
- } else
+ } else {
PrintFatalError(Predicate->getLoc(),
"Unsupported predicate for both instruction: " +
Predicate->getType()->getAsString());
+ }
}
void MacroFusionPredicatorEmitter::run(raw_ostream &OS) {
diff --git a/llvm/utils/TableGen/OptionParserEmitter.cpp b/llvm/utils/TableGen/OptionParserEmitter.cpp
index be0ed1e..ba99015 100644
--- a/llvm/utils/TableGen/OptionParserEmitter.cpp
+++ b/llvm/utils/TableGen/OptionParserEmitter.cpp
@@ -24,9 +24,9 @@ using namespace llvm;
static std::string getOptionName(const Record &R) {
// Use the record name unless EnumName is defined.
if (isa<UnsetInit>(R.getValueInit("EnumName")))
- return std::string(R.getName());
+ return R.getName().str();
- return std::string(R.getValueAsString("EnumName"));
+ return R.getValueAsString("EnumName").str();
}
static raw_ostream &writeStrTableOffset(raw_ostream &OS,
@@ -389,8 +389,9 @@ static void emitOptionParser(const RecordKeeper &Records, raw_ostream &OS) {
OS << ",\n";
OS << " ";
writeCstring(OS, R.getValueAsString("HelpText"));
- } else
+ } else {
OS << ", nullptr";
+ }
// Not using Visibility specific text for group help.
emitHelpTextsForVariants(OS, {});
@@ -428,8 +429,9 @@ static void emitOptionParser(const RecordKeeper &Records, raw_ostream &OS) {
GroupFlags = DI->getDef()->getValueAsListInit("Flags");
GroupVis = DI->getDef()->getValueAsListInit("Visibility");
OS << getOptionName(*DI->getDef());
- } else
+ } else {
OS << "INVALID";
+ }
// The option alias (if any).
OS << ", ";
@@ -490,8 +492,9 @@ static void emitOptionParser(const RecordKeeper &Records, raw_ostream &OS) {
OS << ",\n";
OS << " ";
writeCstring(OS, R.getValueAsString("HelpText"));
- } else
+ } else {
OS << ", nullptr";
+ }
std::vector<std::pair<std::vector<std::string>, StringRef>>
HelpTextsForVariants;
@@ -522,8 +525,9 @@ static void emitOptionParser(const RecordKeeper &Records, raw_ostream &OS) {
writeCstring(OS, R.getValueAsString("Values"));
else if (!isa<UnsetInit>(R.getValueInit("ValuesCode"))) {
OS << getOptionName(R) << "_Values";
- } else
+ } else {
OS << "nullptr";
+ }
};
auto IsMarshallingOption = [](const Record &R) {
diff --git a/llvm/utils/TableGen/PseudoLoweringEmitter.cpp b/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
index 44a17a3..e65265e 100644
--- a/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
+++ b/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
@@ -126,8 +126,9 @@ void PseudoLoweringEmitter::addOperandMapping(
auto &Entry = OperandMap[MIOpNo];
Entry.Kind = OpData::Imm;
Entry.Data.Imm = *BI->convertInitializerToInt();
- } else
+ } else {
llvm_unreachable("Unhandled pseudo-expansion argument type!");
+ }
}
void PseudoLoweringEmitter::evaluateExpansion(const Record *Rec) {
@@ -281,8 +282,9 @@ void PseudoLoweringEmitter::emitLoweringEmitter(raw_ostream &o) {
<< " }\n";
}
o << " }\n return true;";
- } else
+ } else {
o << " return false;";
+ }
o << "\n}\n\n";
}
diff --git a/llvm/utils/TableGen/RegisterBankEmitter.cpp b/llvm/utils/TableGen/RegisterBankEmitter.cpp
index e931000..e00b06c 100644
--- a/llvm/utils/TableGen/RegisterBankEmitter.cpp
+++ b/llvm/utils/TableGen/RegisterBankEmitter.cpp
@@ -370,8 +370,9 @@ void RegisterBankEmitter::emitBaseClassImplementation(
if (HasAmbigousOrMissingEntry) {
OS << " if (RegBankID != InvalidRegBankID)\n"
" return getRegBank(RegBankID);\n";
- } else
+ } else {
OS << " return getRegBank(RegBankID);\n";
+ }
OS << " }\n"
" llvm_unreachable(llvm::Twine(\"Target needs to handle register "
"class ID "
diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index 98f0d7e..e283f1c49 100644
--- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -937,7 +937,7 @@ void RegisterInfoEmitter::runMCDesc(raw_ostream &OS) {
unsigned i = 0;
for (auto I = Regs.begin(), E = Regs.end(); I != E; ++I, ++i) {
const auto &Reg = *I;
- RegStrings.add(std::string(Reg.getName()));
+ RegStrings.add(Reg.getName().str());
// Compute the ordered sub-register list.
SetVector<const CodeGenRegister *> SR;
@@ -974,7 +974,7 @@ void RegisterInfoEmitter::runMCDesc(raw_ostream &OS) {
OS << "namespace llvm {\n\n";
- const std::string &TargetName = std::string(Target.getName());
+ const std::string &TargetName = Target.getName().str();
// Emit the shared table of differential lists.
OS << "extern const int16_t " << TargetName << "RegDiffLists[] = {\n";
@@ -1009,7 +1009,7 @@ void RegisterInfoEmitter::runMCDesc(raw_ostream &OS) {
constexpr unsigned RegUnitBits = 12;
assert(isUInt<RegUnitBits>(FirstRU) && "Too many regunits");
assert(isUInt<32 - RegUnitBits>(Offset) && "Offset is too big");
- OS << " { " << RegStrings.get(std::string(Reg.getName())) << ", "
+ OS << " { " << RegStrings.get(Reg.getName().str()) << ", "
<< DiffSeqs.get(SubRegLists[i]) << ", " << DiffSeqs.get(SuperRegLists[i])
<< ", " << SubRegIdxSeqs.get(SubRegIdxLists[i]) << ", "
<< (Offset << RegUnitBits | FirstRU) << ", "
@@ -1144,7 +1144,7 @@ void RegisterInfoEmitter::runTargetHeader(raw_ostream &OS) {
OS << "\n#ifdef GET_REGINFO_HEADER\n";
OS << "#undef GET_REGINFO_HEADER\n\n";
- const std::string &TargetName = std::string(Target.getName());
+ const std::string &TargetName = Target.getName().str();
std::string ClassName = TargetName + "GenRegisterInfo";
OS << "#include \"llvm/CodeGen/TargetRegisterInfo.h\"\n\n";
@@ -1472,7 +1472,7 @@ void RegisterInfoEmitter::runTargetDesc(raw_ostream &OS) {
OS << "} // end anonymous namespace\n";
// Emit extra information about registers.
- const std::string &TargetName = std::string(Target.getName());
+ const std::string &TargetName = Target.getName().str();
const auto &Regs = RegBank.getRegisters();
unsigned NumRegCosts = 1;
for (const auto &Reg : Regs)
diff --git a/llvm/utils/TableGen/SearchableTableEmitter.cpp b/llvm/utils/TableGen/SearchableTableEmitter.cpp
index 2efa04d..76475f6 100644
--- a/llvm/utils/TableGen/SearchableTableEmitter.cpp
+++ b/llvm/utils/TableGen/SearchableTableEmitter.cpp
@@ -62,7 +62,7 @@ struct GenericField {
bool IsInstruction = false;
GenericEnum *Enum = nullptr;
- GenericField(StringRef Name) : Name(std::string(Name)) {}
+ GenericField(StringRef Name) : Name(Name.str()) {}
};
struct SearchIndex {
@@ -118,7 +118,7 @@ private:
const Init *I) {
if (const StringInit *SI = dyn_cast<StringInit>(I)) {
if (Field.IsCode || SI->hasCodeFormat())
- return std::string(SI->getValue());
+ return SI->getValue().str();
else
return SI->getAsString();
} else if (const BitsInit *BI = dyn_cast<BitsInit>(I))
@@ -134,7 +134,7 @@ private:
if (!Entry)
PrintFatalError(Loc,
Twine("Entry for field '") + Field.Name + "' is null");
- return std::string(Entry->first);
+ return Entry->first.str();
}
PrintFatalError(Loc, Twine("invalid field type for field '") + Field.Name +
"'; expected: bit, bits, string, or code");
@@ -300,7 +300,7 @@ bool SearchableTableEmitter::compareBy(const Record *LHS, const Record *RHS,
void SearchableTableEmitter::emitIfdef(StringRef Guard, raw_ostream &OS) {
OS << "#ifdef " << Guard << "\n";
- PreprocessorGuards.insert(std::string(Guard));
+ PreprocessorGuards.insert(Guard.str());
}
/// Emit a generic enum.
@@ -597,7 +597,7 @@ std::unique_ptr<SearchIndex> SearchableTableEmitter::parseSearchIndex(
GenericTable &Table, const RecordVal *KeyRecVal, StringRef Name,
ArrayRef<StringRef> Key, bool EarlyOut, bool ReturnRange) {
auto Index = std::make_unique<SearchIndex>();
- Index->Name = std::string(Name);
+ Index->Name = Name.str();
Index->Loc = KeyRecVal->getLoc();
Index->EarlyOut = EarlyOut;
Index->ReturnRange = ReturnRange;
@@ -728,8 +728,8 @@ void SearchableTableEmitter::run(raw_ostream &OS) {
ValueField = EnumRec->getValueAsString("ValueField");
auto Enum = std::make_unique<GenericEnum>();
- Enum->Name = std::string(EnumRec->getName());
- Enum->PreprocessorGuard = std::string(EnumRec->getName());
+ Enum->Name = EnumRec->getName().str();
+ Enum->PreprocessorGuard = EnumRec->getName().str();
StringRef FilterClass = EnumRec->getValueAsString("FilterClass");
Enum->Class = Records.getClass(FilterClass);
@@ -747,10 +747,10 @@ void SearchableTableEmitter::run(raw_ostream &OS) {
for (const auto *TableRec :
Records.getAllDerivedDefinitions("GenericTable")) {
auto Table = std::make_unique<GenericTable>();
- Table->Name = std::string(TableRec->getName());
+ Table->Name = TableRec->getName().str();
Table->Locs = TableRec->getLoc();
- Table->PreprocessorGuard = std::string(TableRec->getName());
- Table->CppTypeName = std::string(TableRec->getValueAsString("CppTypeName"));
+ Table->PreprocessorGuard = TableRec->getName().str();
+ Table->CppTypeName = TableRec->getValueAsString("CppTypeName").str();
std::vector<StringRef> Fields = TableRec->getValueAsListOfStrings("Fields");
for (const auto &FieldName : Fields) {
@@ -861,10 +861,10 @@ void SearchableTableEmitter::run(raw_ostream &OS) {
Table->Name = (Twine(Class->getName()) + "sList").str();
Table->Locs = Class->getLoc();
Table->PreprocessorGuard = Class->getName().upper();
- Table->CppTypeName = std::string(Class->getName());
+ Table->CppTypeName = Class->getName().str();
for (const RecordVal &Field : Class->getValues()) {
- std::string FieldName = std::string(Field.getName());
+ std::string FieldName = Field.getName().str();
// Skip uninteresting fields: either special to us, or injected
// template parameters (if they contain a ':').
diff --git a/llvm/utils/TableGen/SubtargetEmitter.cpp b/llvm/utils/TableGen/SubtargetEmitter.cpp
index 96cbfba..9c67424 100644
--- a/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -579,7 +579,7 @@ void SubtargetEmitter::emitStageAndOperandCycleData(
std::string ItinStageString;
unsigned NStages = 0;
if (ItinData)
- formItineraryStageString(std::string(Name), ItinData, ItinStageString,
+ formItineraryStageString(Name.str(), ItinData, ItinStageString,
NStages);
// Get string and operand cycle count
@@ -590,7 +590,7 @@ void SubtargetEmitter::emitStageAndOperandCycleData(
formItineraryOperandCycleString(ItinData, ItinOperandCycleString,
NOperandCycles);
- formItineraryBypassString(std::string(Name), ItinData, ItinBypassString,
+ formItineraryBypassString(Name.str(), ItinData, ItinBypassString,
NOperandCycles);
}
@@ -1382,7 +1382,7 @@ void SubtargetEmitter::genSchedClassTables(const CodeGenProcModel &ProcModel,
for (unsigned I = 0, E = WriteLatencies.size(); I < E; ++I)
if (SchedTables.WriterNames[Idx + I].find(WriterNames[I]) ==
std::string::npos) {
- SchedTables.WriterNames[Idx + I] += std::string("_") + WriterNames[I];
+ SchedTables.WriterNames[Idx + I] += "_" + WriterNames[I];
}
} else {
SCDesc.WriteLatencyIdx = SchedTables.WriteLatencies.size();
@@ -2085,8 +2085,9 @@ void SubtargetEmitter::run(raw_ostream &OS) {
if (SchedModels.hasItineraries()) {
OS << Target << "Stages, " << Target << "OperandCycles, " << Target
<< "ForwardingPaths";
- } else
+ } else {
OS << "nullptr, nullptr, nullptr";
+ }
OS << ");\n}\n\n";
OS << "} // end namespace llvm\n\n";
@@ -2216,8 +2217,9 @@ void SubtargetEmitter::run(raw_ostream &OS) {
if (SchedModels.hasItineraries()) {
OS << Target << "Stages, " << Target << "OperandCycles, " << Target
<< "ForwardingPaths";
- } else
+ } else {
OS << "nullptr, nullptr, nullptr";
+ }
OS << ") {}\n\n";
emitSchedModelHelpers(ClassName, OS);
diff --git a/llvm/utils/TableGen/X86RecognizableInstr.cpp b/llvm/utils/TableGen/X86RecognizableInstr.cpp
index dbfb926..402fc937 100644
--- a/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -458,12 +458,12 @@ void RecognizableInstr::handleOperand(
StringRef typeName = (*Operands)[operandIndex].Rec->getName();
- OperandEncoding encoding = encodingFromString(std::string(typeName), OpSize);
+ OperandEncoding encoding = encodingFromString(typeName.str(), OpSize);
// Adjust the encoding type for an operand based on the instruction.
adjustOperandEncoding(encoding);
Spec->operands[operandIndex].encoding = encoding;
Spec->operands[operandIndex].type =
- typeFromString(std::string(typeName), HasREX_W, OpSize);
+ typeFromString(typeName.str(), HasREX_W, OpSize);
++operandIndex;
++physicalOperandIndex;
diff --git a/llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn
index 01aacc6..acb4354 100644
--- a/llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/AST/BUILD.gn
@@ -29,6 +29,7 @@ static_library("AST") {
"//clang/lib/Basic",
"//clang/lib/Lex",
"//llvm/lib/BinaryFormat",
+ "//llvm/lib/Frontend/HLSL",
"//llvm/lib/Frontend/OpenMP",
"//llvm/lib/Support",
"//llvm/lib/TargetParser",
diff --git a/llvm/utils/gn/secondary/lldb/test/BUILD.gn b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
index 30e53d6..07f463ca1 100644
--- a/llvm/utils/gn/secondary/lldb/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
@@ -181,7 +181,7 @@ group("test") {
"//clang/tools/driver:symlinks",
"//lld/tools/lld:symlinks",
"//lldb/tools/driver:lldb",
- "//lldb/tools/lldb-dap",
+ "//lldb/tools/lldb-dap/tool:lldb-dap",
# XXX lldb-instr, darwin-debug, etc
"//lldb/tools/lldb-server",
diff --git a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
index 96d771e..76c48f4 100644
--- a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
@@ -1,34 +1,13 @@
import("//llvm/utils/TableGen/tablegen.gni")
-import("//llvm/utils/gn/build/write_cmake_config.gni")
-import("//llvm/version.gni")
-tablegen("Options") {
- visibility = [ ":lldb-dap" ]
- args = [ "-gen-opt-parser-defs" ]
-}
-
-if (host_os == "mac") {
- write_cmake_config("write_info_plist") {
- input = "lldb-dap-Info.plist.in"
- output = "$target_gen_dir/lldb-dap-Info.plist"
- values = [ "LLDB_VERSION=$llvm_version" ]
- }
-}
-
-executable("lldb-dap") {
+static_library("lib") {
+ output_name = "lldbDAP"
configs += [ "//llvm/utils/gn/build:lldb_code" ]
deps = [
- ":Options",
"//lldb/source/API:liblldb",
"//llvm/lib/Option",
"//llvm/lib/Support",
]
- if (host_os == "mac") {
- deps += [ ":write_info_plist" ]
- plist = get_target_outputs(":write_info_plist")
- ldflags = [ "-Wl,-sectcreate,__TEXT,__info_plist," +
- rebase_path(plist[0], root_out_dir) ]
- }
if (current_os == "win") {
libs = [ "ws2_32.lib" ]
}
@@ -98,6 +77,5 @@ executable("lldb-dap") {
"SourceBreakpoint.cpp",
"Transport.cpp",
"Watchpoint.cpp",
- "lldb-dap.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/tool/BUILD.gn b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/tool/BUILD.gn
new file mode 100644
index 0000000..8b764843
--- /dev/null
+++ b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/tool/BUILD.gn
@@ -0,0 +1,35 @@
+import("//llvm/utils/TableGen/tablegen.gni")
+import("//llvm/utils/gn/build/write_cmake_config.gni")
+import("//llvm/version.gni")
+
+tablegen("Options") {
+ visibility = [ ":lldb-dap" ]
+ args = [ "-gen-opt-parser-defs" ]
+ td_file = "../Options.td"
+}
+
+if (host_os == "mac") {
+ write_cmake_config("write_info_plist") {
+ input = "lldb-dap-Info.plist.in"
+ output = "$target_gen_dir/lldb-dap-Info.plist"
+ values = [ "LLDB_VERSION=$llvm_version" ]
+ }
+}
+
+executable("lldb-dap") {
+ configs += [ "//llvm/utils/gn/build:lldb_code" ]
+ deps = [
+ ":Options",
+ "//lldb/tools/lldb-dap:lib",
+ ]
+ if (host_os == "mac") {
+ deps += [ ":write_info_plist" ]
+ plist = get_target_outputs(":write_info_plist")
+ ldflags = [ "-Wl,-sectcreate,__TEXT,__info_plist," +
+ rebase_path(plist[0], root_out_dir) ]
+ }
+
+ include_dirs = [ ".." ]
+
+ sources = [ "lldb-dap.cpp" ]
+}
diff --git a/llvm/utils/gn/secondary/llvm/lib/DebugInfo/GSYM/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/DebugInfo/GSYM/BUILD.gn
index cadd34d..157fa6e 100644
--- a/llvm/utils/gn/secondary/llvm/lib/DebugInfo/GSYM/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/DebugInfo/GSYM/BUILD.gn
@@ -12,6 +12,7 @@ static_library("GSYM") {
"FileWriter.cpp",
"FunctionInfo.cpp",
"GsymCreator.cpp",
+ "GsymDIContext.cpp",
"GsymReader.cpp",
"Header.cpp",
"InlineInfo.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
index cdde768..facdde2 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
@@ -176,6 +176,7 @@ static_library("LLVMAMDGPUCodeGen") {
"AMDGPUPostLegalizerCombiner.cpp",
"AMDGPUPreLegalizerCombiner.cpp",
"AMDGPUPreloadKernArgProlog.cpp",
+ "AMDGPUPreloadKernelArguments.cpp",
"AMDGPUPrintfRuntimeBinding.cpp",
"AMDGPUPromoteAlloca.cpp",
"AMDGPUPromoteKernelArguments.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn
index 28808d2..2838901 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn
@@ -12,10 +12,17 @@ tablegen("AVRGenDAGISel") {
td_file = "AVR.td"
}
+tablegen("AVRGenSDNodeInfo") {
+ visibility = [ ":LLVMAVRCodeGen" ]
+ args = [ "-gen-sd-node-info" ]
+ td_file = "AVR.td"
+}
+
static_library("LLVMAVRCodeGen") {
deps = [
":AVRGenCallingConv",
":AVRGenDAGISel",
+ ":AVRGenSDNodeInfo",
"MCTargetDesc",
"TargetInfo",
"//llvm/include/llvm/Config:llvm-config",
@@ -37,6 +44,7 @@ static_library("LLVMAVRCodeGen") {
"AVRInstrInfo.cpp",
"AVRMCInstLower.cpp",
"AVRRegisterInfo.cpp",
+ "AVRSelectionDAGInfo.cpp",
"AVRShiftExpand.cpp",
"AVRSubtarget.cpp",
"AVRTargetMachine.cpp",
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
index ade2b64..790d2e7 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
@@ -79,6 +79,9 @@ def LoopVectorizeAttr : LLVM_Attr<"LoopVectorize", "loop_vectorize"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopInterleaveAttr : LLVM_Attr<"LoopInterleave", "loop_interleave"> {
@@ -92,6 +95,9 @@ def LoopInterleaveAttr : LLVM_Attr<"LoopInterleave", "loop_interleave"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopUnrollAttr : LLVM_Attr<"LoopUnroll", "loop_unroll"> {
@@ -111,6 +117,9 @@ def LoopUnrollAttr : LLVM_Attr<"LoopUnroll", "loop_unroll"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopUnrollAndJamAttr : LLVM_Attr<"LoopUnrollAndJam", "loop_unroll_and_jam"> {
@@ -130,6 +139,9 @@ def LoopUnrollAndJamAttr : LLVM_Attr<"LoopUnrollAndJam", "loop_unroll_and_jam">
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopLICMAttr : LLVM_Attr<"LoopLICM", "loop_licm"> {
@@ -145,6 +157,9 @@ def LoopLICMAttr : LLVM_Attr<"LoopLICM", "loop_licm"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopDistributeAttr : LLVM_Attr<"LoopDistribute", "loop_distribute"> {
@@ -162,6 +177,9 @@ def LoopDistributeAttr : LLVM_Attr<"LoopDistribute", "loop_distribute"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopPipelineAttr : LLVM_Attr<"LoopPipeline", "loop_pipeline"> {
@@ -176,6 +194,9 @@ def LoopPipelineAttr : LLVM_Attr<"LoopPipeline", "loop_pipeline"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopPeeledAttr : LLVM_Attr<"LoopPeeled", "loop_peeled"> {
@@ -189,6 +210,9 @@ def LoopPeeledAttr : LLVM_Attr<"LoopPeeled", "loop_peeled"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopUnswitchAttr : LLVM_Attr<"LoopUnswitch", "loop_unswitch"> {
@@ -202,6 +226,9 @@ def LoopUnswitchAttr : LLVM_Attr<"LoopUnswitch", "loop_unswitch"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LoopAnnotationAttr : LLVM_Attr<"LoopAnnotation", "loop_annotation"> {
@@ -232,6 +259,9 @@ def LoopAnnotationAttr : LLVM_Attr<"LoopAnnotation", "loop_annotation"> {
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -311,6 +341,9 @@ def LLVM_DIExpressionAttr : LLVM_Attr<"DIExpression", "di_expression"> {
def LLVM_DINullTypeAttr : LLVM_Attr<"DINullType", "di_null_type",
/*traits=*/[], "DITypeAttr"> {
let parameters = (ins);
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -336,6 +369,9 @@ def LLVM_DIBasicTypeAttr : LLVM_Attr<"DIBasicType", "di_basic_type",
}]>
];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -365,6 +401,9 @@ def LLVM_DICompileUnitAttr : LLVM_Attr<"DICompileUnit", "di_compile_unit",
}]>
];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -422,6 +461,9 @@ def LLVM_DICompositeTypeAttr : LLVM_Attr<"DICompositeType", "di_composite_type",
/// @}
}];
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -441,6 +483,9 @@ def LLVM_DIDerivedTypeAttr : LLVM_Attr<"DIDerivedType", "di_derived_type",
OptionalParameter<"DINodeAttr">:$extraData
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -455,6 +500,9 @@ def LLVM_DIFileAttr : LLVM_Attr<"DIFile", "di_file", /*traits=*/[], "DIScopeAttr
}]>
];
let assemblyFormat = "`<` $name `in` $directory `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -469,6 +517,9 @@ def LLVM_DIGlobalVariableExpressionAttr
);
let assemblyFormat = "`<` struct(params) `>`";
let constBuilderCall = "$0";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def DIGlobalVariableExpressionArrayAttr :
@@ -492,6 +543,9 @@ def LLVM_DIGlobalVariable : LLVM_Attr<"DIGlobalVariable", "di_global_variable",
OptionalParameter<"bool">:$isDefined,
OptionalParameter<"unsigned">:$alignInBits);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -515,6 +569,9 @@ def LLVM_DILexicalBlockAttr : LLVM_Attr<"DILexicalBlock", "di_lexical_block",
}]>
];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -536,6 +593,9 @@ def LLVM_DILexicalBlockFile : LLVM_Attr<"DILexicalBlockFile", "di_lexical_block_
}]>
];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -566,6 +626,9 @@ def LLVM_DILocalVariableAttr : LLVM_Attr<"DILocalVariable", "di_local_variable",
}]>
];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -619,6 +682,9 @@ def LLVM_DISubprogramAttr : LLVM_Attr<"DISubprogram", "di_subprogram",
/// @}
}];
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -639,6 +705,9 @@ def LLVM_DIModuleAttr : LLVM_Attr<"DIModule", "di_module",
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -654,6 +723,9 @@ def LLVM_DINamespaceAttr : LLVM_Attr<"DINamespace", "di_namespace",
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -673,6 +745,9 @@ def LLVM_DIImportedEntityAttr : LLVM_Attr<"DIImportedEntity", "di_imported_entit
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -719,6 +794,9 @@ def LLVM_DICommonBlockAttr : LLVM_Attr<"DICommonBlock", "di_common_block",
OptionalParameter<"unsigned">:$line
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -753,6 +831,9 @@ def LLVM_DISubroutineTypeAttr : LLVM_Attr<"DISubroutineType", "di_subroutine_typ
}]>
];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -778,6 +859,9 @@ def LLVM_DILabelAttr : LLVM_Attr<"DILabel", "di_label",
];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -797,6 +881,9 @@ def LLVM_DIStringTypeAttr : LLVM_Attr<"DIStringType", "di_string_type",
LLVM_DIEncodingParameter:$encoding
);
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -845,6 +932,9 @@ def LLVM_AliasScopeDomainAttr : LLVM_Attr<"AliasScopeDomain",
}];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -902,6 +992,9 @@ def LLVM_AliasScopeAttr : LLVM_Attr<"AliasScope", "alias_scope"> {
let assemblyFormat = "`<` struct(params) `>`";
let genVerifyDecl = 1;
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LLVM_AliasScopeArrayAttr
@@ -937,6 +1030,9 @@ def LLVM_AccessGroupAttr : LLVM_Attr<"AccessGroup", "access_group"> {
}];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LLVM_AccessGroupArrayAttr
@@ -967,6 +1063,9 @@ def LLVM_TBAARootAttr : LLVM_Attr<"TBAARoot", "tbaa_root", [], "TBAANodeAttr"> {
}];
let assemblyFormat = "(`<` struct(params)^ `>`)?";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -1040,6 +1139,9 @@ def LLVM_TBAATypeDescriptorAttr : LLVM_Attr<"TBAATypeDescriptor",
}];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
//===----------------------------------------------------------------------===//
@@ -1082,6 +1184,9 @@ def LLVM_TBAATagAttr : LLVM_Attr<"TBAATag", "tbaa_tag"> {
}];
let assemblyFormat = "`<` struct(params) `>`";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
def LLVM_TBAATagArrayAttr
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 6540273..654aff7 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -3445,6 +3445,70 @@ def NVVM_Tcgen05StOp : NVVM_Op<"tcgen05.st"> {
}
//===----------------------------------------------------------------------===//
+// NVVM dot.accumulate.4way Op
+//===----------------------------------------------------------------------===//
+
+def DotAccumulate4WayS8 : I32EnumAttrCase<"S8", 1, "s8">;
+def DotAccumulate4WayU8 : I32EnumAttrCase<"U8", 0, "u8">;
+
+def DotAccumulate4WayType : I32EnumAttr<"DotAccumulate4WayType",
+ "NVVM DotAccumulate4WayType",
+ [DotAccumulate4WayS8, DotAccumulate4WayU8]> {
+ let cppNamespace = "::mlir::NVVM";
+ let genSpecializedAttr = 0;
+}
+
+def DotAccumulate4WayTypeAttr : EnumAttr<NVVM_Dialect, DotAccumulate4WayType, "dot_accumulate_4way_type"> {
+ let assemblyFormat = "`<` $value `>`";
+}
+
+def NVVM_DotAccumulate4WayOp : NVVM_Op<"dot.accumulate.4way"> {
+ let summary = "Four-way byte dot product-accumulate instruction.";
+ let description = [{
+ Performs a four-way byte dot-product which is accumulated in a 32-bit
+ result.
+ Operand `a` and `b` are vectors of 4 bytes between which the dot product is
+ computed.
+ The `a_type` and `b_type` attributes specify the type of the elements in `a`
+ and `b` respectively.
+ If `a_type` or `b_type` is `s8`, then the elements in the corresponding
+ vector are sign-extended to 32-bit before the dot product is computed.
+ If `a_type` or `b_type` is `u8`, then the elements in the corresponding
+ vector are zero-extended to 32-bit instead.
+ Operand `c` is a 32-bit integer to which the result is accumulated. It is
+ treated as holding a signed integer if any of `a_type` or `b_type` is `s8`.
+
+ [For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/#integer-arithmetic-instructions-dp4a)
+ }];
+
+ let arguments = (ins
+ VectorOfLengthAndType<[4], [I8]>:$a,
+ DotAccumulate4WayTypeAttr:$a_type,
+ VectorOfLengthAndType<[4], [I8]>:$b,
+ DotAccumulate4WayTypeAttr:$b_type,
+ I32:$c
+ );
+
+ let results = (outs I32:$res);
+
+ let assemblyFormat = "$a $a_type `,` $b $b_type `,` $c attr-dict `:` type($a) `,` type($b)";
+
+ let extraClassDeclaration = [{
+ static llvm::Intrinsic::ID
+ getIntrinsicID(NVVM::DotAccumulate4WayType a_type,
+ NVVM::DotAccumulate4WayType b_type);
+ llvm::Value* getPackedArg(llvm::Value* arg, llvm::IRBuilderBase& builder);
+ }];
+
+ string llvmBuilder = [{
+ llvm::Intrinsic::ID id = NVVM::DotAccumulate4WayOp::getIntrinsicID($a_type, $b_type);
+ llvm::Value* argA = op.getPackedArg($a, builder);
+ llvm::Value* argB = op.getPackedArg($b, builder);
+ $res = createIntrinsicCall(builder, id, {argA, argB, $c});
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// NVVM target attribute.
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml
index b44af2d..6344861 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml
@@ -1718,76 +1718,6 @@ structured_op: !LinalgStructuredOpConfig
scalar_arg: BZp
--- !LinalgOpConfig
metadata: !LinalgOpMetadata
- name: batch_reduce_matmul
- cpp_class_name: BatchReduceMatmulOp
- doc: |-
- Performs a batch-reduce matrix multiplication of two 3D inputs.
- The partial multiplication results are reduced into a 2D output.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- implements:
- - LinalgContractionOpInterface
-structured_op: !LinalgStructuredOpConfig
- args:
- - !LinalgOperandDefConfig
- name: A
- kind: input_tensor
- type_var: T1
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s0, s1, s2)>
- - !LinalgOperandDefConfig
- name: B
- kind: input_tensor
- type_var: T2
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s0, s2, s3)>
- - !LinalgOperandDefConfig
- name: C
- kind: output_tensor
- type_var: U
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s1, s3)>
- indexing_maps: !LinalgIndexingMapsConfig
- static_indexing_maps:
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0, d1, d3)>
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0, d3, d2)>
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d1, d2)>
- iterator_types:
- - reduction
- - parallel
- - parallel
- - reduction
- assignments:
- - !ScalarAssign
- arg: C
- value: !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: add
- operands:
- - !ScalarExpression
- scalar_arg: C
- - !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: mul
- operands:
- - !ScalarExpression
- scalar_fn:
- kind: type
- fn_name: cast_signed
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: A
- - !ScalarExpression
- scalar_fn:
- kind: type
- fn_name: cast_signed
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: B
---- !LinalgOpConfig
-metadata: !LinalgOpMetadata
name: matvec
cpp_class_name: MatvecOp
doc: |-
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index f3dbeb2..6178381 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -690,34 +690,32 @@ def MatmulOp : LinalgStructuredBase_Op<"matmul", [
Example Transpose:
```mlir
- linalg.matmul indexing_maps = [
- affine_map<(d0, d1, d2) -> (d2, d0)>, // transpose
- affine_map<(d0, d1, d2) -> (d2, d1)>,
- affine_map<(d0, d1, d2) -> (d0, d1)>
- ]
- ins(%arg0, %arg1 : memref<5x3xf32>,memref<5x7xf32>)
- outs(%arg2: memref<3x7xf32>)
+ linalg.matmul
+ indexing_maps = [affine_map<(m, n, k) -> (k, m)>, // transpose
+ affine_map<(m, n, k) -> (k, n)>,
+ affine_map<(m, n, k) -> (m, n)>]
+ ins(%arg0, %arg1 : memref<5x3xf32>,memref<5x7xf32>)
+ outs(%arg2: memref<3x7xf32>)
```
Example Broadcast:
- ```mlir
- linalg.matmul indexing_maps = [
- affine_map<(d0, d1, d2) -> (d2)>, // broadcast
- affine_map<(d0, d1, d2) -> (d2, d1)>,
- affine_map<(d0, d1, d2) -> (d0, d1)>
- ]
- ins(%arg0, %arg1 : memref<3xf32>, memref<5x7xf32>)
- outs(%arg2: memref<3x7xf32>)
+ ```mlir
+ linalg.matmul
+ indexing_maps = [affine_map<(m, n, k) -> (k)>, // broadcast
+ affine_map<(m, n, k) -> (k, n)>,
+ affine_map<(m, n, k) -> (m, n)>]
+ ins(%arg0, %arg1 : memref<3xf32>, memref<5x7xf32>)
+ outs(%arg2: memref<3x7xf32>)
```
Example Broadcast and transpose:
```mlir
- linalg.matmul indexing_maps = [
- affine_map<(d0, d1, d2) -> (d2, d0)>, // transpose
- affine_map<(d0, d1, d2) -> (d2)>, // broadcast
- affine_map<(d0, d1, d2) -> (d0, d1)>
- ]
- ins(%arg0, %arg1 : memref<5x3xf32>, memref<7xf32>) outs(%arg2: memref<3x7xf32>)
+ linalg.matmul
+ indexing_maps = [affine_map<(m, n, k) -> (k, m)>, // transpose
+ affine_map<(m, n, k) -> (k)>, // broadcast
+ affine_map<(m, n, k) -> (m, n)>]
+ ins(%arg0, %arg1 : memref<5x3xf32>, memref<7xf32>)
+ outs(%arg2: memref<3x7xf32>)
```
}];
@@ -775,7 +773,7 @@ def MatmulOp : LinalgStructuredBase_Op<"matmul", [
static void regionBuilder(ImplicitLocOpBuilder &b,
Block &block, ArrayRef<NamedAttribute> attrs);
- /// Returns a list of AffineMap with the typical matmul indexing charactristic.
+ /// Returns a list of AffineMap with the default matmul indexing charactristic.
static SmallVector<AffineMap> getDefaultIndexingMaps(MLIRContext *context);
/// Returns true if the given broadcast map \p bcastMap is valid for this op.
@@ -954,35 +952,32 @@ def BatchMatmulOp : LinalgStructuredBase_Op<"batch_matmul", !listconcat([AttrSiz
Example Transpose:
```mlir
- linalg.batch_matmul indexing_maps = [
- affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, // transpose
- affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
- affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
- ]
- ins(%arg0, %arg1 : memref<2x5x3xf32>,memref<2x5x7xf32>)
- outs(%arg2: memref<2x3x7xf32>)
+ linalg.batch_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, k, m)>, // transpose
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (batch, m, n)>]
+ ins(%arg0, %arg1 : memref<2x5x3xf32>,memref<2x5x7xf32>)
+ outs(%arg2: memref<2x3x7xf32>)
```
Example Broadcast:
```mlir
- linalg.batch_matmul indexing_maps = [
- affine_map<(d0, d1, d2, d3) -> (d3)>, // broadcast
- affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
- affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
- ]
- ins(%arg0, %arg1 : memref<5xf32>, memref<2x5x7xf32>)
- outs(%arg2: memref<2x3x7xf32>)
+ linalg.batch_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (k)>, // broadcast
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (batch, m, n)>]
+ ins(%arg0, %arg1 : memref<5xf32>, memref<2x5x7xf32>)
+ outs(%arg2: memref<2x3x7xf32>)
```
Example Broadcast and Transpose:
```mlir
- linalg.batch_matmul indexing_maps = [
- affine_map<(d0, d1, d2, d3) -> (d1, d3)>, // broadcast
- affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, // transpose
- affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
- ]
- ins(%arg0, %arg1 : memref<3x5xf32>, memref<2x7x5xf32>)
- outs(%arg2: memref<2x3x7xf32>)
+ linalg.batch_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (m, k)>, // broadcast
+ affine_map<(batch, m, n, k) -> (batch, n, k)>, // transpose
+ affine_map<(batch, m, n, k) -> (batch, m, n)>]
+ ins(%arg0, %arg1 : memref<3x5xf32>, memref<2x7x5xf32>)
+ outs(%arg2: memref<2x3x7xf32>)
```
}];
@@ -1066,6 +1061,134 @@ def BatchMatmulOp : LinalgStructuredBase_Op<"batch_matmul", !listconcat([AttrSiz
//===----------------------------------------------------------------------===//
+// Op definition for BatchReduceMatmulOp
+//===----------------------------------------------------------------------===//
+
+def BatchReduceMatmulOp : LinalgStructuredBase_Op<"batch_reduce_matmul", [
+ AttrSizedOperandSegments,
+ LinalgContractionOpInterface]> {
+
+ let summary = [{Performs a batch-reduce matrix multiplication on two inputs.
+ The partial multiplication results are reduced into a 2D output.}];
+ let description = [{
+ Numeric casting is performed on the operands to the inner multiply,
+ promoting them to the same data type as the accumulator/output.
+
+ Broadcast and Transpose semantics can be applied by specifying the explicit attribute
+ 'indexing_maps' as shown below. This is a list attribute, so must include maps for all
+ arguments if specified.
+
+ Example Transpose:
+ ```mlir
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, k, m)>, // transpose
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%arg0, %arg1 : memref<2x5x3xf32>,memref<2x5x7xf32>)
+ outs(%arg2: memref<3x7xf32>)
+ ```
+
+ Example Broadcast:
+ ```mlir
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (k)>, // broadcast
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%arg0, %arg1 : memref<5xf32>, memref<2x5x7xf32>)
+ outs(%arg2: memref<3x7xf32>)
+ ```
+
+ Example Broadcast and Transpose:
+ ```mlir
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (m, k)>, // broadcast
+ affine_map<(batch, m, n, k) -> (batch, n, k)>, // transpose
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%arg0, %arg1 : memref<3x5xf32>, memref<2x7x5xf32>)
+ outs(%arg2: memref<3x7xf32>)
+ ```
+ }];
+
+ let arguments = (ins
+ Variadic<AnyType>:$inputs,
+ Variadic<AnyShaped>:$outputs,
+ DefaultValuedOptionalAttr<
+ AffineMapArrayAttr,
+ "BatchReduceMatmulOp::getDefaultIndexingMaps($_builder.getContext())"
+ >:$indexing_maps,
+ DefaultValuedOptionalAttr<TypeFnAttr, "TypeFn::cast_signed">:$cast
+ );
+ let results = (outs Variadic<AnyRankedTensor>:$result_tensors);
+ let regions = (region AnyRegion:$region);
+
+ let skipDefaultBuilders = 1;
+ let builders = [
+ OpBuilder<
+ (ins "ValueRange":$inputs, "ValueRange":$outputs,
+ CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes),
+ [{
+ buildBatchReduceMatmulOp($_builder, $_state, std::nullopt, inputs, outputs,
+ attributes, BatchReduceMatmulOp::getRegionBuilder(),
+ BatchReduceMatmulOp::getDefaultIndexingMaps($_builder.getContext()));
+ }]>,
+ OpBuilder<
+ (ins "TypeRange":$resultTensorTypes, "ValueRange":$inputs,
+ "ValueRange":$outputs,
+ CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes),
+ [{
+ buildBatchReduceMatmulOp($_builder, $_state, resultTensorTypes,
+ inputs, outputs, attributes, BatchReduceMatmulOp::getRegionBuilder(),
+ BatchReduceMatmulOp::getDefaultIndexingMaps($_builder.getContext()));
+ }]>,
+ OpBuilder<
+ (ins "TypeRange":$resultTensorTypes, "ValueRange":$inputs,
+ "ValueRange":$outputs,
+ "Attribute":$cast, CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes),
+ [{
+ $_state.addAttribute("cast", cast);
+ buildBatchReduceMatmulOp($_builder, $_state, resultTensorTypes, inputs, outputs,
+ attributes, BatchReduceMatmulOp::getRegionBuilder(),
+ BatchReduceMatmulOp::getDefaultIndexingMaps($_builder.getContext()));
+ }]>
+
+ ];
+ let hasCustomAssemblyFormat = 1;
+ let hasFolder = 1;
+ let hasVerifier = 1;
+
+ let extraClassDeclaration = structuredOpsBaseDecls # [{
+ SmallVector<utils::IteratorType> getIteratorTypesArray();
+
+ /// Implements the block region builder.
+ static void regionBuilder(ImplicitLocOpBuilder &b,
+ Block &block, ArrayRef<NamedAttribute> attrs);
+
+ /// Returns a list of AffineMap with the default batch_reduce_matmul indexing charactristic.
+ static SmallVector<AffineMap> getDefaultIndexingMaps(MLIRContext *context);
+
+ /// Returns true if the given broadcast map \p bcastMap is valid for this op.
+ bool isValidLhsRhsBroadcastMap(AffineMap bcastMap, bool isLHS = true);
+
+ static std::function<void(ImplicitLocOpBuilder &,
+ Block &, ArrayRef<NamedAttribute>)>
+ getRegionBuilder() {
+ return regionBuilder;
+ }
+
+ ::mlir::MutableOperandRange getDpsInitsMutable() {
+ return getOutputsMutable();
+ }
+
+ // Generic methods.
+ static unsigned getNumRegionArgs();
+ std::string getLibraryCallName();
+ bool hasDynamicIndexingMaps() { return true; };
+ /// Returns true if the user defined indexing maps are not equal to default maps.
+ bool hasUserDefinedMaps();
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// Named Linalg ops, implemented as a declarative configurations of generic ops.
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramAttributes.td b/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramAttributes.td
index eb6e293..9b0020e 100644
--- a/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramAttributes.td
+++ b/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramAttributes.td
@@ -40,6 +40,9 @@ def MLProgram_ExternAttr : MLProgram_Attr<"Extern", [TypedAttrInterface]> {
let parameters = (ins AttributeSelfTypeParameter<"">:$type);
let mnemonic = "extern";
let assemblyFormat = "";
+
+ // Generate mnemonic alias for the attribute.
+ let genMnemonicAlias = 1;
}
#endif // MLPROGRAM_ATTRIBUTES
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 4d49e52..3aefcea 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1273,7 +1273,7 @@ def Vector_TransferReadOp :
AttrSizedOperandSegments,
DestinationStyleOpInterface
]>,
- Arguments<(ins AnyShaped:$source,
+ Arguments<(ins AnyShaped:$base,
Variadic<Index>:$indices,
AffineMapAttr:$permutation_map,
AnyType:$padding,
@@ -1522,7 +1522,7 @@ def Vector_TransferWriteOp :
DestinationStyleOpInterface
]>,
Arguments<(ins AnyVectorOfAnyRank:$valueToStore,
- AnyShaped:$source,
+ AnyShaped:$base,
Variadic<Index>:$indices,
AffineMapAttr:$permutation_map,
Optional<VectorOfNonZeroRankOf<[I1]>>:$mask,
@@ -1663,7 +1663,7 @@ def Vector_TransferWriteOp :
/// ops of other dialects.
Value getValue() { return getVector(); }
- MutableOperandRange getDpsInitsMutable() { return getSourceMutable(); }
+ MutableOperandRange getDpsInitsMutable() { return getBaseMutable(); }
}];
let hasFolder = 1;
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
index 6d04ee5..032ce5b 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
@@ -303,7 +303,6 @@ def XeGPU_LayoutAttr : XeGPUAttr<"Layout", "layout"> {
return LayoutAttr::get(getContext(), getSgLayout(), getSgData(), nullptr,
getLaneLayout(), getLaneData(), getOrder());
}
-
}];
let assemblyFormat = "`<` struct(params) `>`";
diff --git a/mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h b/mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h
index 3e94021..559cc3e 100644
--- a/mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h
@@ -9,16 +9,80 @@
#ifndef MLIR_DIALECT_XEGPU_TRANSFORMS_TRANSFORMS_H
#define MLIR_DIALECT_XEGPU_TRANSFORMS_TRANSFORMS_H
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/LogicalResult.h"
+#include "mlir/IR/Operation.h"
+
+#include <functional>
+#include <optional>
+#include <utility>
+
namespace mlir {
class RewritePatternSet;
namespace xegpu {
+/// Options to control the XeGPU unrolling. Its main purpose is to
+/// provide a way to customize the native shape of the operation.
+struct UnrollOptions {
+ /// Callback function that indicates whether vector unrolling should be
+ /// attempted on the operation.
+ using FilterConstraintFnType = std::function<LogicalResult(Operation *op)>;
+ FilterConstraintFnType filterConstraint = nullptr;
+ UnrollOptions &setFilterConstraint(FilterConstraintFnType constraint) {
+ filterConstraint = std::move(constraint);
+ return *this;
+ }
+
+ /// Function that computes the target shape for unrolling. It returns an
+ /// optional vector of integers representing the shape. If it returns
+ /// `std::nullopt`, unrolling is aborted for the given operation.
+ using NativeShapeFnType =
+ std::function<std::optional<SmallVector<int64_t>>(Operation *op)>;
+ NativeShapeFnType nativeShape = nullptr;
+ UnrollOptions &setNativeShapeFn(NativeShapeFnType fn) {
+ nativeShape = std::move(fn);
+ return *this;
+ }
+
+ /// Function that converts a ShapedType (TensorDescType or VectorType)
+ /// into the unrolled type based on the tileShape. It returns a vector of
+ /// types representing the unrolled types for simplicity.
+ using UnrolledTypeFnType = std::function<SmallVector<Type>(
+ ShapedType type, ArrayRef<int64_t> tileShape)>;
+ UnrolledTypeFnType getUnrolledTypes = nullptr;
+ UnrollOptions &setUnrolledTypesFn(UnrolledTypeFnType fn) {
+ getUnrolledTypes = std::move(fn);
+ return *this;
+ }
+};
+
/// Appends patterns for folding aliasing ops into XeGPU ops into `patterns`.
void populateXeGPUFoldAliasOpsPatterns(RewritePatternSet &patterns);
+
/// Appends patterns for XeGPU SIMT distribution into `patterns`.
void populateXeGPUSubgroupDistributePatterns(RewritePatternSet &patterns);
+/// Collect a set of patterns to unroll xegpu operations to a smaller shapes.
+/// Users can control whether an operation to be unrolled or not, as well as
+/// its target shape via `options` structure. (via setting filterConstraint
+/// and nativeShape respectively, both of them are function refs taking `op` as
+/// input).
+/// An `op` is unrolled to the `targetShape` as follows, for each of its
+/// operands:
+/// 1. the unrolled type `unrolledType` and number of unrolled instances
+/// `numUnrolledInstances` are computed from the `targetShape`.
+/// 2. pack each operand. ExtractStridedSlice are created to break-up the
+/// vector operands. And BuiltinUnrealizedCastop are created to break-up
+/// the TensorDesc operands.
+/// 3. the original op is cloned `numUnrolledInstances` times, once for each
+/// result.
+/// 4. unpack the results. InsertStridedSlice are inserted for VectorType
+/// result, and BuiltinUnrealizedCastOp are inserted for TensorDescType result
+/// to re-assemble the slices into the original shape.
+void populateXeGPUUnrollPatterns(RewritePatternSet &patterns,
+ const UnrollOptions &options);
+
} // namespace xegpu
} // namespace mlir
diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h
index 95d9441..68ab152 100644
--- a/mlir/include/mlir/IR/Operation.h
+++ b/mlir/include/mlir/IR/Operation.h
@@ -679,8 +679,7 @@ public:
if (numRegions == 0)
return MutableArrayRef<Region>();
- auto *regions = getTrailingObjects<Region>();
- return {regions, numRegions};
+ return getTrailingObjects<Region>(numRegions);
}
/// Returns the region held by this operation at position 'index'.
@@ -694,7 +693,7 @@ public:
//===--------------------------------------------------------------------===//
MutableArrayRef<BlockOperand> getBlockOperands() {
- return {getTrailingObjects<BlockOperand>(), numSuccs};
+ return getTrailingObjects<BlockOperand>(numSuccs);
}
// Successor iteration.
diff --git a/mlir/include/mlir/Interfaces/VectorInterfaces.td b/mlir/include/mlir/Interfaces/VectorInterfaces.td
index 8ea9d92..c72ca58 100644
--- a/mlir/include/mlir/Interfaces/VectorInterfaces.td
+++ b/mlir/include/mlir/Interfaces/VectorInterfaces.td
@@ -111,7 +111,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
TODO: Change name of operand, which is not accurate for xfer_write.
}],
/*retTy=*/"::mlir::Value",
- /*methodName=*/"getSource",
+ /*methodName=*/"getBase",
/*args=*/(ins)
>,
InterfaceMethod<
@@ -187,6 +187,12 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
return inBounds;
}
+ /// Wrapper for getBase, which replaced getSource.
+ [[deprecated("Use getBase instead!")]]
+ ::mlir::Value getSource() {
+ return $_op.getBase();
+ }
+
/// Return the number of leading shaped dimensions (of the "source" operand)
/// that do not participate in the permutation map.
unsigned getLeadingShapedRank() {
@@ -203,7 +209,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
/// Return the shaped type of the "source" operand value.
::mlir::ShapedType getShapedType() {
- return ::llvm::cast<::mlir::ShapedType>($_op.getSource().getType());
+ return ::llvm::cast<::mlir::ShapedType>($_op.getBase().getType());
}
/// Return the number of dimensions that participate in the permutation map.
diff --git a/mlir/include/mlir/Tools/PDLL/AST/Nodes.h b/mlir/include/mlir/Tools/PDLL/AST/Nodes.h
index f174ac2..9ad9483 100644
--- a/mlir/include/mlir/Tools/PDLL/AST/Nodes.h
+++ b/mlir/include/mlir/Tools/PDLL/AST/Nodes.h
@@ -183,10 +183,10 @@ public:
/// Return the children of this compound statement.
MutableArrayRef<Stmt *> getChildren() {
- return {getTrailingObjects<Stmt *>(), numChildren};
+ return getTrailingObjects(numChildren);
}
ArrayRef<Stmt *> getChildren() const {
- return const_cast<CompoundStmt *>(this)->getChildren();
+ return getTrailingObjects(numChildren);
}
ArrayRef<Stmt *>::iterator begin() const { return getChildren().begin(); }
ArrayRef<Stmt *>::iterator end() const { return getChildren().end(); }
@@ -275,10 +275,10 @@ public:
/// Return the replacement values of this statement.
MutableArrayRef<Expr *> getReplExprs() {
- return {getTrailingObjects<Expr *>(), numReplExprs};
+ return getTrailingObjects(numReplExprs);
}
ArrayRef<Expr *> getReplExprs() const {
- return const_cast<ReplaceStmt *>(this)->getReplExprs();
+ return getTrailingObjects(numReplExprs);
}
private:
@@ -400,12 +400,8 @@ public:
Expr *getCallableExpr() const { return callable; }
/// Return the arguments of this call.
- MutableArrayRef<Expr *> getArguments() {
- return {getTrailingObjects<Expr *>(), numArgs};
- }
- ArrayRef<Expr *> getArguments() const {
- return const_cast<CallExpr *>(this)->getArguments();
- }
+ MutableArrayRef<Expr *> getArguments() { return getTrailingObjects(numArgs); }
+ ArrayRef<Expr *> getArguments() const { return getTrailingObjects(numArgs); }
/// Returns whether the result of this call is to be negated.
bool getIsNegated() const { return isNegated; }
@@ -534,10 +530,10 @@ public:
/// Return the operands of this operation.
MutableArrayRef<Expr *> getOperands() {
- return {getTrailingObjects<Expr *>(), numOperands};
+ return getTrailingObjects<Expr *>(numOperands);
}
ArrayRef<Expr *> getOperands() const {
- return const_cast<OperationExpr *>(this)->getOperands();
+ return getTrailingObjects<Expr *>(numOperands);
}
/// Return the result types of this operation.
@@ -550,10 +546,10 @@ public:
/// Return the attributes of this operation.
MutableArrayRef<NamedAttributeDecl *> getAttributes() {
- return {getTrailingObjects<NamedAttributeDecl *>(), numAttributes};
+ return getTrailingObjects<NamedAttributeDecl *>(numAttributes);
}
- MutableArrayRef<NamedAttributeDecl *> getAttributes() const {
- return const_cast<OperationExpr *>(this)->getAttributes();
+ ArrayRef<NamedAttributeDecl *> getAttributes() const {
+ return getTrailingObjects<NamedAttributeDecl *>(numAttributes);
}
private:
@@ -594,10 +590,10 @@ public:
/// Return the element expressions of this range.
MutableArrayRef<Expr *> getElements() {
- return {getTrailingObjects<Expr *>(), numElements};
+ return getTrailingObjects(numElements);
}
ArrayRef<Expr *> getElements() const {
- return const_cast<RangeExpr *>(this)->getElements();
+ return getTrailingObjects(numElements);
}
/// Return the range result type of this expression.
@@ -627,10 +623,10 @@ public:
/// Return the element expressions of this tuple.
MutableArrayRef<Expr *> getElements() {
- return {getTrailingObjects<Expr *>(), getType().size()};
+ return getTrailingObjects(getType().size());
}
ArrayRef<Expr *> getElements() const {
- return const_cast<TupleExpr *>(this)->getElements();
+ return getTrailingObjects(getType().size());
}
/// Return the tuple result type of this expression.
@@ -916,10 +912,10 @@ public:
/// Return the input arguments of this constraint.
MutableArrayRef<VariableDecl *> getInputs() {
- return {getTrailingObjects<VariableDecl *>(), numInputs};
+ return getTrailingObjects<VariableDecl *>(numInputs);
}
ArrayRef<VariableDecl *> getInputs() const {
- return const_cast<UserConstraintDecl *>(this)->getInputs();
+ return getTrailingObjects<VariableDecl *>(numInputs);
}
/// Return the explicit native type to use for the given input. Returns
@@ -1126,16 +1122,16 @@ public:
/// Return the input arguments of this rewrite.
MutableArrayRef<VariableDecl *> getInputs() {
- return {getTrailingObjects<VariableDecl *>(), numInputs};
+ return getTrailingObjects(numInputs);
}
ArrayRef<VariableDecl *> getInputs() const {
- return const_cast<UserRewriteDecl *>(this)->getInputs();
+ return getTrailingObjects(numInputs);
}
/// Return the explicit results of the rewrite declaration. May be empty,
/// even if the rewrite has results (e.g. in the case of inferred results).
MutableArrayRef<VariableDecl *> getResults() {
- return {getTrailingObjects<VariableDecl *>() + numInputs, numResults};
+ return {getTrailingObjects() + numInputs, numResults};
}
ArrayRef<VariableDecl *> getResults() const {
return const_cast<UserRewriteDecl *>(this)->getResults();
@@ -1257,10 +1253,10 @@ public:
/// Return the constraints of this variable.
MutableArrayRef<ConstraintRef> getConstraints() {
- return {getTrailingObjects<ConstraintRef>(), numConstraints};
+ return getTrailingObjects(numConstraints);
}
ArrayRef<ConstraintRef> getConstraints() const {
- return const_cast<VariableDecl *>(this)->getConstraints();
+ return getTrailingObjects(numConstraints);
}
/// Return the initializer expression of this statement, or nullptr if there
@@ -1304,10 +1300,10 @@ public:
/// Return the children of this module.
MutableArrayRef<Decl *> getChildren() {
- return {getTrailingObjects<Decl *>(), numChildren};
+ return getTrailingObjects(numChildren);
}
ArrayRef<Decl *> getChildren() const {
- return const_cast<Module *>(this)->getChildren();
+ return getTrailingObjects(numChildren);
}
private:
diff --git a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
index 097cb9c..e1fd20f 100644
--- a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
+++ b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
@@ -2398,8 +2398,9 @@ bool IntegerRelation::removeDuplicateConstraints() {
addEquality(getInequality(k));
removeInequality(k);
removeInequality(l);
- } else
+ } else {
*this = getEmpty(getSpace());
+ }
break;
}
diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index dd16ec4..c52bf505 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -319,7 +319,6 @@ struct LowerGpuOpsToROCDLOpsPass final
{
RewritePatternSet patterns(ctx);
populateGpuRewritePatterns(patterns);
- arith::populateExpandBFloat16Patterns(patterns);
(void)applyPatternsGreedily(m, std::move(patterns));
}
diff --git a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
index 58b85bc..d6f9495 100644
--- a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
+++ b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
@@ -58,7 +58,7 @@ struct TransferReadToArmSMELowering
return rewriter.notifyMatchFailure(transferReadOp,
"not a valid vector type for SME");
- if (!llvm::isa<MemRefType>(transferReadOp.getSource().getType()))
+ if (!llvm::isa<MemRefType>(transferReadOp.getBase().getType()))
return rewriter.notifyMatchFailure(transferReadOp, "not a memref source");
// Out-of-bounds dims are not supported.
@@ -84,7 +84,7 @@ struct TransferReadToArmSMELowering
auto mask = transferReadOp.getMask();
auto padding = mask ? transferReadOp.getPadding() : nullptr;
rewriter.replaceOpWithNewOp<arm_sme::TileLoadOp>(
- transferReadOp, vectorType, transferReadOp.getSource(),
+ transferReadOp, vectorType, transferReadOp.getBase(),
transferReadOp.getIndices(), padding, mask, layout);
return success();
@@ -128,7 +128,7 @@ struct TransferWriteToArmSMELowering
if (!arm_sme::isValidSMETileVectorType(vType))
return failure();
- if (!llvm::isa<MemRefType>(writeOp.getSource().getType()))
+ if (!llvm::isa<MemRefType>(writeOp.getBase().getType()))
return failure();
// Out-of-bounds dims are not supported.
@@ -149,7 +149,7 @@ struct TransferWriteToArmSMELowering
: arm_sme::TileSliceLayout::Horizontal;
rewriter.replaceOpWithNewOp<arm_sme::TileStoreOp>(
- writeOp, writeOp.getVector(), writeOp.getSource(), writeOp.getIndices(),
+ writeOp, writeOp.getVector(), writeOp.getBase(), writeOp.getIndices(),
writeOp.getMask(), layout);
return success();
}
@@ -686,7 +686,7 @@ struct FoldTransferWriteOfExtractTileSlice
LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp,
PatternRewriter &rewriter) const final {
- if (!isa<MemRefType>(writeOp.getSource().getType()))
+ if (!isa<MemRefType>(writeOp.getBase().getType()))
return rewriter.notifyMatchFailure(writeOp, "destination not a memref");
if (writeOp.hasOutOfBoundsDim())
@@ -713,7 +713,7 @@ struct FoldTransferWriteOfExtractTileSlice
rewriter.replaceOpWithNewOp<arm_sme::StoreTileSliceOp>(
writeOp, extractTileSlice.getTile(),
- extractTileSlice.getTileSliceIndex(), mask, writeOp.getSource(),
+ extractTileSlice.getTileSliceIndex(), mask, writeOp.getBase(),
writeOp.getIndices(), extractTileSlice.getLayout());
return success();
}
diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
index 5275c63..8b16da3 100644
--- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
+++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
@@ -486,7 +486,7 @@ struct CombineTransferReadOpTranspose final
Value result =
rewriter
.create<vector::TransferReadOp>(
- loc, resultType, transferReadOp.getSource(),
+ loc, resultType, transferReadOp.getBase(),
transferReadOp.getIndices(), AffineMapAttr::get(newMap),
transferReadOp.getPadding(), transferReadOp.getMask(),
transferReadOp.getInBoundsAttr())
@@ -581,7 +581,7 @@ convertTransferReadOp(RewriterBase &rewriter, vector::TransferReadOp op,
gpu::MMAMatrixType type =
gpu::MMAMatrixType::get(op.getVectorType().getShape(), elType, fragType);
Value load = rewriter.create<gpu::SubgroupMmaLoadMatrixOp>(
- op.getLoc(), type, op.getSource(), op.getIndices(),
+ op.getLoc(), type, op.getBase(), op.getIndices(),
rewriter.getIndexAttr(*stride),
isTranspose ? rewriter.getUnitAttr() : UnitAttr());
valueMapping[mappingResult] = load;
@@ -612,7 +612,7 @@ convertTransferWriteOp(RewriterBase &rewriter, vector::TransferWriteOp op,
Value matrix = it->second;
auto store = rewriter.create<gpu::SubgroupMmaStoreMatrixOp>(
- op.getLoc(), matrix, op.getSource(), op.getIndices(),
+ op.getLoc(), matrix, op.getBase(), op.getIndices(),
rewriter.getIndexAttr(*stride), /*transpose=*/UnitAttr());
(void)store;
@@ -759,7 +759,7 @@ creatLdMatrixCompatibleLoads(RewriterBase &rewriter, vector::TransferReadOp op,
indices);
nvgpu::LdMatrixOp newOp = rewriter.create<nvgpu::LdMatrixOp>(
- loc, vectorType, op.getSource(), indices, *transpose, params->numTiles);
+ loc, vectorType, op.getBase(), indices, *transpose, params->numTiles);
valueMapping[op] = newOp->getResult(0);
return success();
}
@@ -818,7 +818,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op,
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);
Value el = rewriter.create<vector::LoadOp>(loc, loadedElType,
- op.getSource(), newIndices);
+ op.getBase(), newIndices);
result = rewriter.create<vector::InsertOp>(loc, el, result, i);
}
} else {
@@ -841,7 +841,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op,
getXferIndices<vector::TransferReadOp>(
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);
Value el = rewriter.create<memref::LoadOp>(op.getLoc(), loadedElType,
- op.getSource(), newIndices);
+ op.getBase(), newIndices);
result = rewriter.create<vector::InsertOp>(
op.getLoc(), el, result, ArrayRef<int64_t>{i, innerIdx});
}
@@ -875,7 +875,7 @@ convertTransferReadToLoads(RewriterBase &rewriter, vector::TransferReadOp op,
return rewriter.notifyMatchFailure(op, "no warpMatrixInfo");
bool isLdMatrixCompatible =
- isSharedMemory(cast<MemRefType>(op.getSource().getType())) &&
+ isSharedMemory(cast<MemRefType>(op.getBase().getType())) &&
nvgpu::inferTileWidthInBits(*warpMatrixInfo) == 128;
VectorType vecTy = op.getVectorType();
@@ -933,7 +933,7 @@ convertTransferWriteToStores(RewriterBase &rewriter, vector::TransferWriteOp op,
SmallVector<Value, 4> newIndices;
getXferIndices<vector::TransferWriteOp>(
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);
- rewriter.create<vector::StoreOp>(loc, el, op.getSource(), newIndices);
+ rewriter.create<vector::StoreOp>(loc, el, op.getBase(), newIndices);
}
LLVM_DEBUG(DBGS() << "erase: " << op << "\n");
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 5296013..400003d 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -896,8 +896,9 @@ public:
} else if (kind == vector::CombiningKind::MAXNUMF) {
result = createFPReductionComparisonOpLowering<LLVM::vector_reduce_fmax>(
rewriter, loc, llvmType, operand, acc, fmf);
- } else
+ } else {
return failure();
+ }
rewriter.replaceOp(reductionOp, result);
return success();
diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
index b9b598c..cc56230 100644
--- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
+++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
@@ -198,8 +198,7 @@ static Value generateInBoundsCheck(
Location loc = xferOp.getLoc();
ImplicitLocOpBuilder lb(xferOp.getLoc(), b);
if (!xferOp.isDimInBounds(0) && !isBroadcast) {
- Value memrefDim =
- vector::createOrFoldDimOp(b, loc, xferOp.getSource(), *dim);
+ Value memrefDim = vector::createOrFoldDimOp(b, loc, xferOp.getBase(), *dim);
AffineExpr d0, d1;
bindDims(xferOp.getContext(), d0, d1);
Value base = xferOp.getIndices()[*dim];
@@ -426,7 +425,7 @@ struct Strategy<TransferReadOp> {
auto vecType = dyn_cast<VectorType>(bufferType.getElementType());
auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
auto newXferOp = b.create<vector::TransferReadOp>(
- loc, vecType, xferOp.getSource(), xferIndices,
+ loc, vecType, xferOp.getBase(), xferIndices,
AffineMapAttr::get(unpackedPermutationMap(b, xferOp)),
xferOp.getPadding(), Value(), inBoundsAttr);
@@ -512,7 +511,7 @@ struct Strategy<TransferWriteOp> {
Location loc = xferOp.getLoc();
auto vec = b.create<memref::LoadOp>(loc, buffer, loadIndices);
auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
- auto source = loopState.empty() ? xferOp.getSource() : loopState[0];
+ auto source = loopState.empty() ? xferOp.getBase() : loopState[0];
Type type = isTensorOp(xferOp) ? xferOp.getShapedType() : Type();
auto newXferOp = b.create<vector::TransferWriteOp>(
loc, type, vec, source, xferIndices,
@@ -544,7 +543,7 @@ struct Strategy<TransferWriteOp> {
/// Return the initial loop state for the generated scf.for loop.
static Value initialLoopState(TransferWriteOp xferOp) {
- return isTensorOp(xferOp) ? xferOp.getSource() : Value();
+ return isTensorOp(xferOp) ? xferOp.getBase() : Value();
}
};
@@ -1145,7 +1144,7 @@ struct ScalableTransposeTransferWriteConversion
ArrayRef<OpFoldResult>(*maskDims).drop_front());
}
- Value initDest = isTensorOp(writeOp) ? writeOp.getSource() : Value{};
+ Value initDest = isTensorOp(writeOp) ? writeOp.getBase() : Value{};
ValueRange initLoopArgs = initDest ? initDest : ValueRange{};
auto result = rewriter.create<scf::ForOp>(
loc, lb, ub, step, initLoopArgs,
@@ -1165,7 +1164,7 @@ struct ScalableTransposeTransferWriteConversion
// Create the transfer_write for the slice.
Value dest =
- loopIterArgs.empty() ? writeOp.getSource() : loopIterArgs.front();
+ loopIterArgs.empty() ? writeOp.getBase() : loopIterArgs.front();
auto newWriteOp = b.create<vector::TransferWriteOp>(
loc, sliceVec, dest, xferIndices,
ArrayRef<bool>(writeOp.getInBoundsValues()).drop_front());
@@ -1340,7 +1339,7 @@ struct UnrollTransferReadConversion
auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
auto newXferOp = b.create<vector::TransferReadOp>(
- loc, newXferVecType, xferOp.getSource(), xferIndices,
+ loc, newXferVecType, xferOp.getBase(), xferIndices,
AffineMapAttr::get(unpackedPermutationMap(b, xferOp)),
xferOp.getPadding(), Value(), inBoundsAttr);
maybeAssignMask(b, xferOp, newXferOp, i);
@@ -1449,7 +1448,7 @@ struct UnrollTransferWriteConversion
}
int64_t dimSize = inputVectorTy.getShape()[0];
- Value source = xferOp.getSource(); // memref or tensor to be written to.
+ Value source = xferOp.getBase(); // memref or tensor to be written to.
auto sourceType = isTensorOp(xferOp) ? xferOp.getShapedType() : Type();
// Generate fully unrolled loop of transfer ops.
@@ -1567,8 +1566,7 @@ struct Strategy1d<TransferReadOp> {
b, xferOp, iv, dim, TypeRange(xferOp.getVectorType()),
/*inBoundsCase=*/
[&](OpBuilder &b, Location loc) {
- Value val =
- b.create<memref::LoadOp>(loc, xferOp.getSource(), indices);
+ Value val = b.create<memref::LoadOp>(loc, xferOp.getBase(), indices);
return b.create<vector::InsertElementOp>(loc, val, vec, iv);
},
/*outOfBoundsCase=*/
@@ -1599,7 +1597,7 @@ struct Strategy1d<TransferWriteOp> {
/*inBoundsCase=*/[&](OpBuilder &b, Location loc) {
auto val =
b.create<vector::ExtractElementOp>(loc, xferOp.getVector(), iv);
- b.create<memref::StoreOp>(loc, val, xferOp.getSource(), indices);
+ b.create<memref::StoreOp>(loc, val, xferOp.getBase(), indices);
});
b.create<scf::YieldOp>(loc);
}
diff --git a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
index 0bc0f2f..adcee19 100644
--- a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
+++ b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
@@ -192,7 +192,7 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
xegpu::CreateNdDescOp ndDesc =
createNdDescriptor(rewriter, loc, descType,
- dyn_cast<TypedValue<MemRefType>>(readOp.getSource()),
+ dyn_cast<TypedValue<MemRefType>>(readOp.getBase()),
readOp.getIndices());
DenseI64ArrayAttr transposeAttr =
@@ -231,10 +231,10 @@ struct TransferWriteLowering
vecTy.getShape(), vecTy.getElementType(),
/*array_length=*/1, /*boundary_check=*/writeOp.hasOutOfBoundsDim(),
xegpu::MemorySpace::Global);
- xegpu::CreateNdDescOp ndDesc = createNdDescriptor(
- rewriter, loc, descType,
- dyn_cast<TypedValue<MemRefType>>(writeOp.getSource()),
- writeOp.getIndices());
+ xegpu::CreateNdDescOp ndDesc =
+ createNdDescriptor(rewriter, loc, descType,
+ dyn_cast<TypedValue<MemRefType>>(writeOp.getBase()),
+ writeOp.getIndices());
// By default, no specific caching policy is assigned.
xegpu::CachePolicyAttr hint = nullptr;
diff --git a/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp b/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp
index a2ac41c..4fe22ea 100644
--- a/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp
+++ b/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp
@@ -118,7 +118,7 @@ static Value createVectorLoadForMaskedLoad(OpBuilder &builder, Location loc,
Value fill = builder.create<vector::SplatOp>(loc, unbroadcastedVectorType,
readOp.getPadding());
Value load = builder.create<vector::LoadOp>(
- loc, unbroadcastedVectorType, readOp.getSource(), readOp.getIndices());
+ loc, unbroadcastedVectorType, readOp.getBase(), readOp.getIndices());
Value res = builder.create<arith::SelectOp>(loc, unbroadcastedVectorType,
readOp.getMask(), load, fill);
// Insert a broadcasting op if required.
@@ -149,7 +149,7 @@ struct TransferReadLowering final : OpRewritePattern<vector::TransferReadOp> {
}
Location loc = readOp.getLoc();
- Value src = readOp.getSource();
+ Value src = readOp.getBase();
VectorType vectorType = readOp.getVectorType();
int64_t vectorSize = vectorType.getNumElements();
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index 411e791..fe53d03 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -233,8 +233,9 @@ std::optional<uint64_t> mlir::affine::getConstantTripCount(AffineForOp forOp) {
std::min(*tripCount, static_cast<uint64_t>(constExpr.getValue()));
else
tripCount = constExpr.getValue();
- } else
+ } else {
return std::nullopt;
+ }
}
return tripCount;
}
diff --git a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
index 62a148d..9596587 100644
--- a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
+++ b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
@@ -315,7 +315,7 @@ struct LegalizeTransferReadOpsByDecomposition
decomposeToSMETiles(rewriter, vectorType, smeTileType, transposed)) {
auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile);
auto smeRead = rewriter.create<vector::TransferReadOp>(
- loc, smeTileType, readOp.getSource(),
+ loc, smeTileType, readOp.getBase(),
getSMESubTileIndices(rewriter, loc, readOp.getIndices(), smeTile),
readOp.getPermutationMapAttr(), readOp.getPadding(), smeMask,
readOp.getInBoundsAttr());
@@ -359,7 +359,7 @@ struct LegalizeTransferWriteOpsByDecomposition
auto smeTileType = getSMETileTypeForElement(vectorType.getElementType());
auto inputSMETiles = adaptor.getValueToStore();
- Value destTensorOrMemref = writeOp.getSource();
+ Value destTensorOrMemref = writeOp.getBase();
for (auto [index, smeTile] : llvm::enumerate(decomposeToSMETiles(
rewriter, vectorType, smeTileType, transposed))) {
auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile);
@@ -497,7 +497,7 @@ struct LegalizeMultiTileTransferWriteAsStoreLoop
auto slice =
rewriter.create<vector::ExtractOp>(loc, tile, tileSliceIndex);
rewriter.create<vector::TransferWriteOp>(
- loc, slice, writeOp.getSource(), ValueRange{storeRow, storeCol},
+ loc, slice, writeOp.getBase(), ValueRange{storeRow, storeCol},
AffineMapAttr::get(writeOp.getPermutationMap().dropResult(0)),
sliceMask,
rewriter.getBoolArrayAttr(
@@ -677,7 +677,7 @@ struct LiftIllegalVectorTransposeToMemory
});
SmallVector<Value> strides(readType.getRank(), Value(one));
auto readSubview = rewriter.create<memref::SubViewOp>(
- loc, illegalRead.getSource(), illegalRead.getIndices(), readSizes,
+ loc, illegalRead.getBase(), illegalRead.getIndices(), readSizes,
strides);
// Apply the transpose to all values/attributes of the transfer_read:
@@ -851,7 +851,7 @@ struct LowerIllegalTransposeStoreViaZA
// Note: We need to use `get_tile` as there's no vector-level `undef`.
Value undefTile = rewriter.create<arm_sme::GetTileOp>(loc, smeTileType);
- Value destTensorOrMemref = writeOp.getSource();
+ Value destTensorOrMemref = writeOp.getBase();
auto numSlicesPerTile =
std::min(sourceType.getDimSize(0), smeTileType.getDimSize(0));
auto numSlices =
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index fc74e3d..c757f3c 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -3727,36 +3727,6 @@ void CallIntrinsicOp::print(OpAsmPrinter &p) {
}
//===----------------------------------------------------------------------===//
-// OpAsmDialectInterface
-//===----------------------------------------------------------------------===//
-
-namespace {
-struct LLVMOpAsmDialectInterface : public OpAsmDialectInterface {
- using OpAsmDialectInterface::OpAsmDialectInterface;
-
- AliasResult getAlias(Attribute attr, raw_ostream &os) const override {
- return TypeSwitch<Attribute, AliasResult>(attr)
- .Case<AccessGroupAttr, AliasScopeAttr, AliasScopeDomainAttr,
- DIBasicTypeAttr, DICommonBlockAttr, DICompileUnitAttr,
- DICompositeTypeAttr, DIDerivedTypeAttr, DIFileAttr,
- DIGlobalVariableAttr, DIGlobalVariableExpressionAttr,
- DIImportedEntityAttr, DILabelAttr, DILexicalBlockAttr,
- DILexicalBlockFileAttr, DILocalVariableAttr, DIModuleAttr,
- DINamespaceAttr, DINullTypeAttr, DIStringTypeAttr,
- DISubprogramAttr, DISubroutineTypeAttr, LoopAnnotationAttr,
- LoopVectorizeAttr, LoopInterleaveAttr, LoopUnrollAttr,
- LoopUnrollAndJamAttr, LoopLICMAttr, LoopDistributeAttr,
- LoopPipelineAttr, LoopPeeledAttr, LoopUnswitchAttr, TBAARootAttr,
- TBAATagAttr, TBAATypeDescriptorAttr>([&](auto attr) {
- os << decltype(attr)::getMnemonic();
- return AliasResult::OverridableAlias;
- })
- .Default([](Attribute) { return AliasResult::NoAlias; });
- }
-};
-} // namespace
-
-//===----------------------------------------------------------------------===//
// LinkerOptionsOp
//===----------------------------------------------------------------------===//
@@ -4024,9 +3994,6 @@ void LLVMDialect::initialize() {
// Support unknown operations because not all LLVM operations are registered.
allowUnknownOperations();
- // clang-format off
- addInterfaces<LLVMOpAsmDialectInterface>();
- // clang-format on
declarePromisedInterface<DialectInlinerInterface, LLVMDialect>();
}
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 3c3731a..1ea3f96 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -33,6 +33,7 @@
#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
@@ -1203,6 +1204,13 @@ LogicalResult NVVM::VoteSyncOp::verify() {
return success();
}
+llvm::Value *
+NVVM::DotAccumulate4WayOp::getPackedArg(llvm::Value *arg,
+ llvm::IRBuilderBase &builder) {
+ return builder.CreateBitCast(arg,
+ llvm::Type::getInt32Ty(builder.getContext()));
+}
+
//===----------------------------------------------------------------------===//
// getIntrinsicID/getIntrinsicIDAndArgs methods
//===----------------------------------------------------------------------===//
@@ -1590,6 +1598,26 @@ static void nvvmInferResultRanges(Operation *op, Value result,
}
}
+llvm::Intrinsic::ID
+DotAccumulate4WayOp::getIntrinsicID(NVVM::DotAccumulate4WayType a_type,
+ NVVM::DotAccumulate4WayType b_type) {
+ bool is_a_siext = a_type == NVVM::DotAccumulate4WayType::S8;
+ bool is_b_siext = b_type == NVVM::DotAccumulate4WayType::S8;
+ unsigned type = (is_a_siext << 1) | is_b_siext;
+ switch (type) {
+ case 0:
+ return llvm::Intrinsic::nvvm_idp4a_u_u;
+ case 1:
+ return llvm::Intrinsic::nvvm_idp4a_u_s;
+ case 2:
+ return llvm::Intrinsic::nvvm_idp4a_s_u;
+ case 3:
+ return llvm::Intrinsic::nvvm_idp4a_s_s;
+ default:
+ llvm_unreachable("Invalid DP4a type");
+ }
+}
+
//===----------------------------------------------------------------------===//
// NVVMDialect initialization, type parsing, and registration.
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index fce0751..96106cf 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -220,6 +220,23 @@ static void buildBatchMatmulOp(OpBuilder &b, OperationState &state,
attributes, regionBuilder);
}
+static void buildBatchReduceMatmulOp(OpBuilder &b, OperationState &state,
+ std::optional<TypeRange> resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes,
+ RegionBuilderFn regionBuilder,
+ ArrayRef<AffineMap> indexingMaps) {
+ // Initialize indexingMaps attribute, for BatchReduceMatmulOp.
+ SmallVector<Attribute, 4> indexingMapsAttrVal;
+ indexingMapsAttrVal =
+ llvm::map_to_vector(indexingMaps, [](AffineMap map) -> Attribute {
+ return AffineMapAttr::get(map);
+ });
+ state.addAttribute("indexing_maps", b.getArrayAttr(indexingMapsAttrVal));
+ return buildStructuredOp(b, state, resultTensorTypes, inputs, outputs,
+ attributes, regionBuilder);
+}
+
/// Common parsing used for both named structured ops created by ods-gen and by
/// manually defined C++ ops. Does not handle regions.
static ParseResult
@@ -3484,19 +3501,24 @@ static LogicalResult verifyExtendedMatmulSemantic(MatmulOp matmulOp,
return success();
}
-// Check general validity of input indexing map.
-static LogicalResult verifyInputMaps(BatchMatmulOp batchMatmulOp,
+// Check general validity of input indexing map of
+// BatchMatmulOp/BatchReduceMatmulOp.
+template <typename OpTy>
+static LogicalResult verifyInputMaps(OpTy batchVariantMatmulOp,
AffineMap opIndexingMap,
AffineMap defaultIndexingMap, bool isLHS) {
+ assert((isa<BatchMatmulOp>(batchVariantMatmulOp) ||
+ isa<BatchReduceMatmulOp>(batchVariantMatmulOp)) &&
+ "Expected BatchMatmulOp or BatchReduceMatmulOp");
// Check the result dims are valid.
if (!areResultExprsSubsetOf(opIndexingMap, defaultIndexingMap))
- return batchMatmulOp->emitOpError()
+ return batchVariantMatmulOp->emitOpError()
<< "Unexpected result dim expression (outside the set of default "
"result dims).";
// Check for valid number of result dims of input maps.
if (opIndexingMap.getNumResults() > 3)
- return batchMatmulOp->emitOpError()
+ return batchVariantMatmulOp->emitOpError()
<< "no. of result dim expressions exceeds 3.";
auto hasValidBatchDim = [](AffineMap map) {
@@ -3506,60 +3528,83 @@ static LogicalResult verifyInputMaps(BatchMatmulOp batchMatmulOp,
// Check if the requested broadcast is valid.
if (isBroadcasted(opIndexingMap, defaultIndexingMap)) {
- if (!batchMatmulOp.isValidLhsRhsBroadcastMap(opIndexingMap, isLHS))
- return batchMatmulOp->emitOpError() << "Invalid broadcast requested.";
+ if (!batchVariantMatmulOp.isValidLhsRhsBroadcastMap(opIndexingMap, isLHS))
+ return batchVariantMatmulOp->emitOpError()
+ << "Invalid broadcast requested.";
} else if (!hasValidBatchDim(opIndexingMap)) {
- return batchMatmulOp->emitOpError()
+ return batchVariantMatmulOp->emitOpError()
<< "Invalid batch dimension expression.";
}
return success();
}
/// This function checks if the given AffineMap for the output of a
-/// BatchMatmulOp has exactly 3 result dimensions and if the output map result
-/// dimensions are valid.
-static LogicalResult verifyOutputMap(BatchMatmulOp batchMatmulOp,
+/// BatchMatmulOp/BatchReduceMatmulOp has exactly the desired number of result
+/// dimensions and if the output map result dimensions are valid.
+template <typename OpTy>
+static LogicalResult verifyOutputMap(OpTy batchVariantMatmulOp,
AffineMap opIndexingMap) {
- if (opIndexingMap.getNumResults() != 3)
- return batchMatmulOp->emitOpError()
+ assert((isa<BatchMatmulOp>(batchVariantMatmulOp) ||
+ isa<BatchReduceMatmulOp>(batchVariantMatmulOp)) &&
+ "Expected BatchMatmulOp or BatchReduceMatmulOp");
+ if (isa<BatchMatmulOp>(batchVariantMatmulOp) &&
+ opIndexingMap.getNumResults() != 3) {
+
+ return batchVariantMatmulOp->emitOpError()
<< "expects 3 dims, but got (" << opIndexingMap.getNumResults()
<< ").";
+ }
+ if (isa<BatchReduceMatmulOp>(batchVariantMatmulOp) &&
+ opIndexingMap.getNumResults() != 2) {
+ return batchVariantMatmulOp->emitOpError()
+ << "expects 2 dims, but got (" << opIndexingMap.getNumResults()
+ << ").";
+ }
- auto areValidOutputResultDim = [](AffineMap outputMap) {
- return outputMap.getResult(0).isFunctionOfDim(0) &&
- outputMap.getResult(1).isFunctionOfDim(1) &&
- outputMap.getResult(2).isFunctionOfDim(2);
+ auto areValidOutputResultDim = [&](AffineMap outputMap) {
+ return isa<BatchMatmulOp>(batchVariantMatmulOp)
+ ? outputMap.getResult(0).isFunctionOfDim(0) &&
+ outputMap.getResult(1).isFunctionOfDim(1) &&
+ outputMap.getResult(2).isFunctionOfDim(2)
+ : outputMap.getResult(0).isFunctionOfDim(1) &&
+ outputMap.getResult(1).isFunctionOfDim(2);
};
- if (!areValidOutputResultDim(opIndexingMap))
- return batchMatmulOp->emitOpError()
+ if (!areValidOutputResultDim(opIndexingMap)) {
+ return batchVariantMatmulOp->emitOpError()
<< "Invalid output map result dimension.";
+ }
return success();
}
/// Verifies the broadcast and transpose semantic specified by the explicit
-/// indexing map for the BatchMatmulOp op for each operand specified by opIndex.
+/// indexing map for the BatchMatmulOp/BatchReduceMatmulOp op for each operand
+/// specified by opIndex.
+template <typename OpTy>
static LogicalResult
-verifyExtendedBatchMatmulSemantic(BatchMatmulOp batchMatmulOp,
- unsigned opIndex) {
+verifyExtendedBatchVariantMatmulSemantic(OpTy batchVariantMatmulOp,
+ unsigned opIndex) {
SmallVector<AffineMap, 3> opIndexingMaps =
- batchMatmulOp.getIndexingMapsArray();
+ batchVariantMatmulOp.getIndexingMapsArray();
SmallVector<AffineMap, 3> defaultIndexingMaps =
- batchMatmulOp.getDefaultIndexingMaps(batchMatmulOp->getContext());
+ batchVariantMatmulOp.getDefaultIndexingMaps(
+ batchVariantMatmulOp->getContext());
if (opIndexingMaps.size() != 3)
- return batchMatmulOp->emitOpError()
+ return batchVariantMatmulOp->emitOpError()
<< "Indexing_map attribute must have 3 affine maps.";
auto opIndexingMap = opIndexingMaps[opIndex];
auto defaultIndexingMap = defaultIndexingMaps[opIndex];
- if (opIndex == 2 && failed(verifyOutputMap(batchMatmulOp, opIndexingMap)))
+ if (opIndex == 2 &&
+ failed(verifyOutputMap(batchVariantMatmulOp, opIndexingMap)))
return failure();
- if (failed(verifyInputMaps(batchMatmulOp, opIndexingMap, defaultIndexingMap,
- opIndex == 0)))
+ if (opIndex != 2 &&
+ failed(verifyInputMaps(batchVariantMatmulOp, opIndexingMap,
+ defaultIndexingMap, opIndex == 0)))
return failure();
return success();
@@ -3635,12 +3680,18 @@ void MatmulOp::regionBuilder(ImplicitLocOpBuilder &b, Block &block,
helper.yieldOutputs(yields);
}
-/// Returns true if the given broadcast map \p bcastMap is valid for this op.
+/// Returns true if the given bcastMap map is a valid broadcast map. A valid
+/// broadcast map must include K dimension.
+/// TODO: Strict inclusion of K dimension in the broadcast map is not
+/// necessary for both input matrices simultaneously. We can relax this
+/// condition to have K dimension for one input matrix map and infer the K
+/// dimension for other input matrix map from the one already having K
+/// dimension.
bool MatmulOp::isValidLhsRhsBroadcastMap(AffineMap bcastMap) {
assert(bcastMap.getNumResults() == 1 && "Expected single result dim expr.");
- AffineExpr exp = bcastMap.getResult(0);
+ AffineExpr expr = bcastMap.getResult(0);
// Invalid map if the common dimension of matmul not found.
- return exp.isFunctionOfDim(bcastMap.getNumDims() - 1);
+ return expr.isFunctionOfDim(bcastMap.getNumDims() - 1);
}
FailureOr<ArrayAttr> parseIndexingMapsAttr(OpAsmParser &parser) {
@@ -3938,21 +3989,31 @@ bool BatchMatmulOp::hasUserDefinedMaps() {
return defaultMaps != explicitMaps;
}
-/// Returns true if the given broadcast map bcastMap is valid for this op.
+/// Returns true if the given bcastMap map is a valid broadcast map. A valid
+/// broadcast map must include K dimension.
+/// TODO: Strict inclusion of K dimension in the broadcast map is not
+/// necessary for both input matrices simultaneously. We can relax this
+/// condition to have K dimension for one input matrix map and infer the K
+/// dimension for other input matrix map from the one already having K
+/// dimension.
bool BatchMatmulOp::isValidLhsRhsBroadcastMap(AffineMap bcastMap, bool isLHS) {
assert(bcastMap.getNumResults() < 3 &&
"Expected less than 3 result dim expr.");
bool isValid = false;
enum Indices { batchPos, mPos, nPos, kPos };
if (bcastMap.getNumResults() == 1) {
- AffineExpr exp = bcastMap.getResult(0);
- isValid = exp.isFunctionOfDim(kPos);
+ AffineExpr expr = bcastMap.getResult(0);
+ isValid = expr.isFunctionOfDim(kPos);
} else if (bcastMap.getNumResults() == 2) {
- AffineExpr exp0 = bcastMap.getResult(0);
- AffineExpr exp1 = bcastMap.getResult(1);
- isValid = isLHS
- ? (exp0.isFunctionOfDim(mPos) && exp1.isFunctionOfDim(kPos))
- : (exp0.isFunctionOfDim(kPos) && exp1.isFunctionOfDim(nPos));
+ AffineExpr expr0 = bcastMap.getResult(0);
+ AffineExpr expr1 = bcastMap.getResult(1);
+ isValid =
+ isLHS ? ((expr0.isFunctionOfDim(batchPos) ||
+ expr0.isFunctionOfDim(mPos)) &&
+ expr1.isFunctionOfDim(kPos))
+ : ((expr0.isFunctionOfDim(batchPos) &&
+ expr1.isFunctionOfDim(kPos)) ||
+ (expr0.isFunctionOfDim(kPos) && expr1.isFunctionOfDim(nPos)));
}
return isValid;
}
@@ -4044,7 +4105,7 @@ LogicalResult BatchMatmulOp::verify() {
return success();
for (unsigned opIndex = 0; opIndex < 3; opIndex++) {
- if (failed(verifyExtendedBatchMatmulSemantic(*this, opIndex)))
+ if (failed(verifyExtendedBatchVariantMatmulSemantic(*this, opIndex)))
return failure();
}
return success();
@@ -4266,8 +4327,9 @@ void ElementwiseOp::regionBuilder(ImplicitLocOpBuilder &b, Block &block,
result = helper.buildTernaryFn(kind.ternaryFn, block.getArgument(0),
block.getArgument(1), block.getArgument(2));
- } else
+ } else {
assert(false && "found unhandled category in elemwise");
+ }
yields.push_back(result);
helper.yieldOutputs(yields);
@@ -5365,6 +5427,176 @@ struct FoldTensorCastUnPackOp : public OpRewritePattern<UnPackOp> {
}
};
+//===----------------------------------------------------------------------===//
+// BatchReduceMatmulOp
+//===----------------------------------------------------------------------===//
+SmallVector<utils::IteratorType> BatchReduceMatmulOp::getIteratorTypesArray() {
+ return SmallVector<utils::IteratorType>{
+ utils::IteratorType::reduction, utils::IteratorType::parallel,
+ utils::IteratorType::parallel, utils::IteratorType::reduction};
+}
+
+SmallVector<AffineMap>
+BatchReduceMatmulOp::getDefaultIndexingMaps(MLIRContext *context) {
+ AffineExpr d0, d1, d2, d3;
+ SmallVector<AffineMap> indexingMaps;
+ bindDims(context, d0, d1, d2, d3);
+ indexingMaps.push_back(AffineMap::get(4, 0, {d0, d1, d3}, context));
+ indexingMaps.push_back(AffineMap::get(4, 0, {d0, d3, d2}, context));
+ indexingMaps.push_back(AffineMap::get(4, 0, {d1, d2}, context));
+ return indexingMaps;
+}
+
+unsigned BatchReduceMatmulOp::getNumRegionArgs() { return 3; }
+
+std::string BatchReduceMatmulOp::getLibraryCallName() {
+ return generateLibraryCallName(getOperation());
+}
+
+/// Check if the op has broadcast and/or transpose semantic. Returns true if
+/// the user defined indexing maps are not equal to default map.
+bool BatchReduceMatmulOp::hasUserDefinedMaps() {
+ SmallVector<AffineMap, 3> defaultMaps =
+ getDefaultIndexingMaps(this->getContext());
+ SmallVector<AffineMap, 3> explicitMaps = getIndexingMapsArray();
+ return defaultMaps != explicitMaps;
+}
+
+/// Returns true if the given bcastMap map is a valid broadcast map. A valid
+/// broadcast map must include K dimension.
+/// TODO: Strict inclusion of K dimension in the broadcast map is not
+/// necessary for both input matrices simultaneously. We can relax this
+/// condition to have K dimension for one input matrix map and infer the K
+/// dimension for other input matrix map from the one already having K
+/// dimension.
+bool BatchReduceMatmulOp::isValidLhsRhsBroadcastMap(AffineMap bcastMap,
+ bool isLHS) {
+ assert(bcastMap.getNumResults() < 3 &&
+ "Expected less than 3 result dim expr.");
+ bool isValid = false;
+ enum Indices { batchPos, mPos, nPos, kPos };
+ if (bcastMap.getNumResults() == 1) {
+ AffineExpr expr = bcastMap.getResult(0);
+ isValid = expr.isFunctionOfDim(kPos);
+ } else if (bcastMap.getNumResults() == 2) {
+ AffineExpr expr0 = bcastMap.getResult(0);
+ AffineExpr expr1 = bcastMap.getResult(1);
+ isValid =
+ isLHS ? ((expr0.isFunctionOfDim(batchPos) ||
+ expr0.isFunctionOfDim(mPos)) &&
+ expr1.isFunctionOfDim(kPos))
+ : ((expr0.isFunctionOfDim(batchPos) &&
+ expr1.isFunctionOfDim(kPos)) ||
+ (expr0.isFunctionOfDim(kPos) && expr1.isFunctionOfDim(nPos)));
+ }
+ return isValid;
+}
+
+void BatchReduceMatmulOp::regionBuilder(ImplicitLocOpBuilder &b, Block &block,
+ ArrayRef<NamedAttribute> attrs) {
+ assert(block.getNumArguments() == 3 &&
+ "BatchReduceMatmulOp regionBuilder expects 3 (>=0) args");
+ RegionBuilderHelper helper(b, block);
+ SmallVector<Value> yields;
+
+ auto toType = block.getArgument(2).getType();
+ Value castValA =
+ helper.buildTypeFn(TypeFn::cast_signed, toType, block.getArgument(0));
+ Value castValB =
+ helper.buildTypeFn(TypeFn::cast_signed, toType, block.getArgument(1));
+ Value mulVal = helper.buildBinaryFn(BinaryFn::mul, castValA, castValB);
+ Value addVal =
+ helper.buildBinaryFn(BinaryFn::add, block.getArgument(2), mulVal);
+ yields.push_back(addVal);
+ helper.yieldOutputs(yields);
+}
+
+ParseResult BatchReduceMatmulOp::parse(OpAsmParser &parser,
+ OperationState &result) {
+ SmallVector<Attribute, 3> indexingMapsAttr;
+ Attribute mapAttr;
+ if (succeeded(parser.parseOptionalKeyword("indexing_maps"))) {
+ if (parser.parseEqual())
+ return failure();
+ if (parser.parseLSquare())
+ return failure();
+
+ do {
+ if (parser.parseAttribute(mapAttr))
+ return failure();
+ if (!isa<AffineMapAttr>(mapAttr)) {
+ return parser.emitError(parser.getCurrentLocation(),
+ "expected affine map attribute");
+ }
+ indexingMapsAttr.push_back(mapAttr);
+
+ if (parser.parseOptionalComma())
+ break;
+ } while (true);
+
+ if (parser.parseRSquare())
+ return failure();
+ }
+ // Initialize indexingMaps, if not supplied explicitly.
+ if (indexingMapsAttr.empty()) {
+ indexingMapsAttr = llvm::map_to_vector(
+ BatchReduceMatmulOp::getDefaultIndexingMaps(parser.getContext()),
+ [](AffineMap map) -> Attribute { return AffineMapAttr::get(map); });
+ }
+ result.addAttribute("indexing_maps",
+ parser.getBuilder().getArrayAttr(indexingMapsAttr));
+ return ::parseNamedStructuredOp(parser, result,
+ BatchReduceMatmulOp::getNumRegionArgs(),
+ BatchReduceMatmulOp::getRegionBuilder());
+}
+
+void BatchReduceMatmulOp::print(OpAsmPrinter &p) {
+ SmallVector<Attribute, 3> indexingMaps = llvm::map_to_vector(
+ BatchReduceMatmulOp::getDefaultIndexingMaps(getContext()),
+ [](AffineMap map) -> Attribute { return AffineMapAttr::get(map); });
+
+ if (!llvm::equal(getIndexingMaps(), indexingMaps)) {
+ p << " indexing_maps = [";
+ llvm::interleaveComma(getIndexingMaps(), p,
+ [&](Attribute attr) { p.printAttribute(attr); });
+ p << "]";
+ }
+
+ SmallVector<StringRef, 3> elidedAttrs = {
+ "operandSegmentSizes", "linalg.memoized_indexing_maps", "indexing_maps"};
+ ::printNamedStructuredOp(p, getOperation(), getInputs(), getOutputs(),
+ elidedAttrs);
+}
+
+/// Verify the user defined indexing maps.
+LogicalResult BatchReduceMatmulOp::verify() {
+ // Verification of pure batch_reduce_matmul is handled by
+ // verifyStructuredOpInterface().
+ if (!hasUserDefinedMaps())
+ return success();
+
+ for (unsigned opIndex = 0; opIndex < 3; opIndex++) {
+ if (failed(verifyExtendedBatchVariantMatmulSemantic(*this, opIndex)))
+ return failure();
+ }
+ return success();
+}
+LogicalResult BatchReduceMatmulOp::fold(FoldAdaptor,
+ SmallVectorImpl<OpFoldResult> &) {
+ return memref::foldMemRefCast(*this);
+}
+void BatchReduceMatmulOp::getEffects(
+ SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
+ &effects) {
+ if (hasPureTensorSemantics())
+ return;
+ getGenericEffectsImpl(effects, cast<LinalgOp>(getOperation()));
+}
+
+Speculation::Speculatability BatchReduceMatmulOp::getSpeculatability() {
+ return getGenericSpeculatabilityImpl(cast<LinalgOp>(getOperation()));
+}
+
} // namespace linalg
} // namespace mlir
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
index 20e4e3c..707b63f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
@@ -171,7 +171,7 @@ void mlir::linalg::hoistRedundantVectorBroadcasts(RewriterBase &rewriter,
static bool noAliasingUseInLoop(vector::TransferReadOp transferRead,
LoopLikeOpInterface loop) {
- Value source = transferRead.getSource();
+ Value source = transferRead.getBase();
// Skip view-like Ops and retrive the actual soruce Operation
while (auto srcOp =
@@ -276,7 +276,7 @@ void mlir::linalg::hoistRedundantVectorTransfers(Operation *root,
for (auto *sliceOp : llvm::reverse(forwardSlice)) {
auto candidateWrite = dyn_cast<vector::TransferWriteOp>(sliceOp);
if (!candidateWrite ||
- candidateWrite.getSource() != transferRead.getSource())
+ candidateWrite.getBase() != transferRead.getBase())
continue;
transferWrite = candidateWrite;
}
@@ -312,11 +312,11 @@ void mlir::linalg::hoistRedundantVectorTransfers(Operation *root,
transferRead.getPermutationMap() != transferWrite.getPermutationMap())
return WalkResult::advance();
- auto *source = transferRead.getSource().getDefiningOp();
+ auto *source = transferRead.getBase().getDefiningOp();
if (source && isa_and_nonnull<ViewLikeOpInterface>(source))
return WalkResult::advance();
- source = transferWrite.getSource().getDefiningOp();
+ source = transferWrite.getBase().getDefiningOp();
if (source && isa_and_nonnull<ViewLikeOpInterface>(source))
return WalkResult::advance();
@@ -325,7 +325,7 @@ void mlir::linalg::hoistRedundantVectorTransfers(Operation *root,
DominanceInfo dom(loop);
if (!dom.properlyDominates(transferRead.getOperation(), transferWrite))
return WalkResult::advance();
- for (auto &use : transferRead.getSource().getUses()) {
+ for (auto &use : transferRead.getBase().getUses()) {
if (!loop->isAncestor(use.getOwner()))
continue;
if (use.getOwner() == transferRead.getOperation() ||
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index a477c2f..63f88d0 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -2627,7 +2627,7 @@ struct PadOpVectorizationWithTransferReadPattern
SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
xferOp->setAttr(xferOp.getInBoundsAttrName(),
rewriter.getBoolArrayAttr(inBounds));
- xferOp.getSourceMutable().assign(padOp.getSource());
+ xferOp.getBaseMutable().assign(padOp.getSource());
xferOp.getPaddingMutable().assign(padValue);
});
@@ -3114,7 +3114,7 @@ LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite(
return rewriter.notifyMatchFailure(xferOp, "unsupported mask");
// Transfer into `view`.
- Value viewOrAlloc = xferOp.getSource();
+ Value viewOrAlloc = xferOp.getBase();
if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
!viewOrAlloc.getDefiningOp<memref::AllocOp>())
return rewriter.notifyMatchFailure(xferOp, "source not a view or alloc");
@@ -3191,7 +3191,7 @@ LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite(
return rewriter.notifyMatchFailure(xferOp, "unsupported mask");
// Transfer into `viewOrAlloc`.
- Value viewOrAlloc = xferOp.getSource();
+ Value viewOrAlloc = xferOp.getBase();
if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
!viewOrAlloc.getDefiningOp<memref::AllocOp>())
return rewriter.notifyMatchFailure(xferOp, "source not a view or alloc");
diff --git a/mlir/lib/Dialect/MLProgram/IR/MLProgramDialect.cpp b/mlir/lib/Dialect/MLProgram/IR/MLProgramDialect.cpp
index bda1032..0b15612 100644
--- a/mlir/lib/Dialect/MLProgram/IR/MLProgramDialect.cpp
+++ b/mlir/lib/Dialect/MLProgram/IR/MLProgramDialect.cpp
@@ -39,14 +39,6 @@ struct MLProgramInlinerInterface : public DialectInlinerInterface {
struct MLProgramOpAsmDialectInterface : public OpAsmDialectInterface {
using OpAsmDialectInterface::OpAsmDialectInterface;
-
- AliasResult getAlias(Attribute attr, raw_ostream &os) const override {
- if (llvm::isa<ExternAttr>(attr)) {
- os << "extern";
- return AliasResult::OverridableAlias;
- }
- return AliasResult::NoAlias;
- }
};
} // namespace
diff --git a/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp
index 05ba6a3..b906c72 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp
@@ -119,7 +119,7 @@ static nvgpu::LdMatrixOp rebuildLdMatrixOp(RewriterBase &rewriter,
template <typename TransferLikeOp>
static FailureOr<Value>
getTransferLikeOpSrcMemRef(TransferLikeOp transferLikeOp) {
- Value src = transferLikeOp.getSource();
+ Value src = transferLikeOp.getBase();
if (isa<MemRefType>(src.getType()))
return src;
return failure();
diff --git a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
index e4fb3f9..f17f7a0 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
@@ -224,7 +224,7 @@ static Value getMemRefOperand(LoadOrStoreOpTy op) {
}
static Value getMemRefOperand(vector::TransferReadOp op) {
- return op.getSource();
+ return op.getBase();
}
static Value getMemRefOperand(nvgpu::LdMatrixOp op) {
@@ -240,7 +240,7 @@ static Value getMemRefOperand(vector::MaskedLoadOp op) { return op.getBase(); }
static Value getMemRefOperand(vector::MaskedStoreOp op) { return op.getBase(); }
static Value getMemRefOperand(vector::TransferWriteOp op) {
- return op.getSource();
+ return op.getBase();
}
static Value getMemRefOperand(gpu::SubgroupMmaLoadMatrixOp op) {
diff --git a/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp b/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
index 80f4c5c..dca2b1a 100644
--- a/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
+++ b/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
@@ -57,8 +57,9 @@ checkOperandAffineExprRecursively(AffineExpr expr,
} else if (rhs.getKind() == AffineExprKind::DimId &&
lhs.getKind() == AffineExprKind::Constant) {
dimExpr = rhs;
- } else
+ } else {
return failure();
+ }
unsigned position = cast<AffineDimExpr>(dimExpr).getPosition();
if ((size_t)position >= seenIds.size() || seenIds[position])
return failure();
diff --git a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
index 556922a..75dbe0b 100644
--- a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
+++ b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
@@ -172,7 +172,7 @@ static Value getValueLoadedFromGlobal(Operation *op) {
if (!load)
return nullptr;
- auto loadType = dyn_cast<MemRefType>(load.getSource().getType());
+ auto loadType = dyn_cast<MemRefType>(load.getBase().getType());
if (!loadType || !hasDefaultMemorySpace(loadType))
return nullptr;
return load;
@@ -185,7 +185,7 @@ static bool isStoreToShared(Operation *op, Value v) {
if (!store || store.getVector() != v)
return false;
- auto storeType = dyn_cast<MemRefType>(store.getSource().getType());
+ auto storeType = dyn_cast<MemRefType>(store.getBase().getType());
return storeType || hasSharedMemorySpace(storeType);
}
diff --git a/mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp b/mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp
index a782ed5..5904e42 100644
--- a/mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp
+++ b/mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp
@@ -71,9 +71,9 @@ Value nvgpu::getMemrefOperand(Operation *op) {
if (auto storeOp = dyn_cast<memref::StoreOp>(op))
return storeOp.getMemref();
if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op))
- return transferWrite.getSource();
+ return transferWrite.getBase();
if (auto transferRead = dyn_cast<vector::TransferReadOp>(op))
- return transferRead.getSource();
+ return transferRead.getBase();
if (auto storeOp = dyn_cast<vector::StoreOp>(op))
return storeOp.getBase();
if (auto loadOp = dyn_cast<vector::LoadOp>(op))
diff --git a/mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp b/mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp
index e80360a..c5a9938 100644
--- a/mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp
+++ b/mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp
@@ -285,7 +285,7 @@ bool nvgpu::canLowerToWarpMatrixOperation(vector::TransferReadOp op) {
// information to ensure correctness of downstream assumptions. It is possible
// to enable this if caller can assert that tensor will be lowered in a
// particular manner.
- auto sourceType = dyn_cast<MemRefType>(op.getSource().getType());
+ auto sourceType = dyn_cast<MemRefType>(op.getBase().getType());
if (!sourceType)
return false;
@@ -309,7 +309,7 @@ bool nvgpu::canLowerToWarpMatrixOperation(vector::TransferWriteOp op) {
return false;
// Currently we can't support reads on tensor types because we need stride
// information to ensure correctness of downstream assumptions.
- auto sourceType = dyn_cast<MemRefType>(op.getSource().getType());
+ auto sourceType = dyn_cast<MemRefType>(op.getBase().getType());
if (!sourceType)
return false;
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index eb6e710..2bf7aaa 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -662,8 +662,9 @@ static ParseResult parseClauseWithRegionArgs(
parser.parseInteger(mapIndicesVec.emplace_back()) ||
parser.parseRSquare())
return failure();
- } else
+ } else {
mapIndicesVec.push_back(-1);
+ }
}
return success();
diff --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
index 551411b..57c2723 100644
--- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
+++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
@@ -594,9 +594,10 @@ transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
} else if (isForallWithIdenticalConfiguration(target, source)) {
fusedLoop = fuseIndependentSiblingForallLoops(
cast<scf::ForallOp>(target), cast<scf::ForallOp>(source), rewriter);
- } else
+ } else {
return emitSilenceableFailure(target->getLoc())
<< "operations cannot be fused";
+ }
assert(fusedLoop && "failed to fuse operations");
diff --git a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
index 3bff148..4aacbe7 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
@@ -417,8 +417,9 @@ scf::ForOp LoopPipelinerInternal::createKernelLoop(
[maxStage - defStage->second];
assert(valueVersion);
newLoopArg.push_back(valueVersion);
- } else
+ } else {
newLoopArg.push_back(forOp.getInitArgs()[retVal.index()]);
+ }
}
for (auto escape : crossStageValues) {
LiverangeInfo &info = escape.second;
diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
index e172f02..ad12673 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopFusion.cpp
@@ -124,8 +124,9 @@ static bool haveNoReadsAfterWriteExceptSameIndex(
OperationEquivalence::Flags::IgnoreLocations)) {
return WalkResult::interrupt();
}
- } else
+ } else {
return WalkResult::interrupt();
+ }
}
}
return WalkResult::advance();
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
index 998b0fb..a19bfb2 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
@@ -36,7 +36,7 @@ namespace tensor {
using namespace mlir;
static Value getTensorOperand(vector::TransferReadOp op) {
- return op.getSource();
+ return op.getBase();
}
static Value getTensorOperand(tensor::InsertSliceOp op) {
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index f9c7fb7..f6c3c6a 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -314,7 +314,7 @@ bool mlir::vector::isDisjointTransferIndices(
bool mlir::vector::isDisjointTransferSet(VectorTransferOpInterface transferA,
VectorTransferOpInterface transferB,
bool testDynamicValueUsingBounds) {
- if (transferA.getSource() != transferB.getSource())
+ if (transferA.getBase() != transferB.getBase())
return false;
return isDisjointTransferIndices(transferA, transferB,
testDynamicValueUsingBounds);
@@ -4205,7 +4205,7 @@ static void printTransferAttrs(OpAsmPrinter &p, VectorTransferOpInterface op) {
}
void TransferReadOp::print(OpAsmPrinter &p) {
- p << " " << getSource() << "[" << getIndices() << "], " << getPadding();
+ p << " " << getBase() << "[" << getIndices() << "], " << getPadding();
if (getMask())
p << ", " << getMask();
printTransferAttrs(p, *this);
@@ -4464,7 +4464,7 @@ static LogicalResult foldTransferFullMask(TransferOp op) {
static Value foldRAW(TransferReadOp readOp) {
if (!llvm::isa<RankedTensorType>(readOp.getShapedType()))
return {};
- auto defWrite = readOp.getSource().getDefiningOp<vector::TransferWriteOp>();
+ auto defWrite = readOp.getBase().getDefiningOp<vector::TransferWriteOp>();
while (defWrite) {
if (checkSameValueRAW(defWrite, readOp))
return defWrite.getVector();
@@ -4472,7 +4472,7 @@ static Value foldRAW(TransferReadOp readOp) {
cast<VectorTransferOpInterface>(defWrite.getOperation()),
cast<VectorTransferOpInterface>(readOp.getOperation())))
break;
- defWrite = defWrite.getSource().getDefiningOp<vector::TransferWriteOp>();
+ defWrite = defWrite.getBase().getDefiningOp<vector::TransferWriteOp>();
}
return {};
}
@@ -4500,7 +4500,7 @@ void TransferReadOp::getEffects(
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) {
if (llvm::isa<MemRefType>(getShapedType()))
- effects.emplace_back(MemoryEffects::Read::get(), &getSourceMutable(),
+ effects.emplace_back(MemoryEffects::Read::get(), &getBaseMutable(),
SideEffects::DefaultResource::get());
}
@@ -4542,7 +4542,7 @@ struct TransferReadAfterWriteToBroadcast
if (readOp.hasOutOfBoundsDim() ||
!llvm::isa<RankedTensorType>(readOp.getShapedType()))
return failure();
- auto defWrite = readOp.getSource().getDefiningOp<vector::TransferWriteOp>();
+ auto defWrite = readOp.getBase().getDefiningOp<vector::TransferWriteOp>();
if (!defWrite)
return failure();
// TODO: If the written transfer chunk is a superset of the read transfer
@@ -4727,7 +4727,7 @@ ParseResult TransferWriteOp::parse(OpAsmParser &parser,
}
void TransferWriteOp::print(OpAsmPrinter &p) {
- p << " " << getVector() << ", " << getSource() << "[" << getIndices() << "]";
+ p << " " << getVector() << ", " << getBase() << "[" << getIndices() << "]";
if (getMask())
p << ", " << getMask();
printTransferAttrs(p, *this);
@@ -4806,7 +4806,7 @@ static LogicalResult foldReadInitWrite(TransferWriteOp write,
if (write.getTransferRank() == 0)
return failure();
auto rankedTensorType =
- llvm::dyn_cast<RankedTensorType>(write.getSource().getType());
+ llvm::dyn_cast<RankedTensorType>(write.getBase().getType());
// If not operating on tensors, bail.
if (!rankedTensorType)
return failure();
@@ -4828,7 +4828,7 @@ static LogicalResult foldReadInitWrite(TransferWriteOp write,
if (read.hasOutOfBoundsDim() || write.hasOutOfBoundsDim())
return failure();
// Tensor types must be the same.
- if (read.getSource().getType() != rankedTensorType)
+ if (read.getBase().getType() != rankedTensorType)
return failure();
// Vector types must be the same.
if (read.getVectorType() != write.getVectorType())
@@ -4845,13 +4845,13 @@ static LogicalResult foldReadInitWrite(TransferWriteOp write,
llvm::any_of(write.getIndices(), isNotConstantZero))
return failure();
// Success.
- results.push_back(read.getSource());
+ results.push_back(read.getBase());
return success();
}
static bool checkSameValueWAR(vector::TransferReadOp read,
vector::TransferWriteOp write) {
- return read.getSource() == write.getSource() &&
+ return read.getBase() == write.getBase() &&
read.getIndices() == write.getIndices() &&
read.getPermutationMap() == write.getPermutationMap() &&
read.getVectorType() == write.getVectorType() && !read.getMask() &&
@@ -4873,7 +4873,7 @@ static bool checkSameValueWAR(vector::TransferReadOp read,
/// ```
static LogicalResult foldWAR(TransferWriteOp write,
SmallVectorImpl<OpFoldResult> &results) {
- if (!llvm::isa<RankedTensorType>(write.getSource().getType()))
+ if (!llvm::isa<RankedTensorType>(write.getBase().getType()))
return failure();
auto read = write.getVector().getDefiningOp<vector::TransferReadOp>();
if (!read)
@@ -4881,7 +4881,7 @@ static LogicalResult foldWAR(TransferWriteOp write,
if (!checkSameValueWAR(read, write))
return failure();
- results.push_back(read.getSource());
+ results.push_back(read.getBase());
return success();
}
@@ -4953,12 +4953,11 @@ public:
return failure();
vector::TransferWriteOp writeToModify = writeOp;
- auto defWrite =
- writeOp.getSource().getDefiningOp<vector::TransferWriteOp>();
+ auto defWrite = writeOp.getBase().getDefiningOp<vector::TransferWriteOp>();
while (defWrite) {
if (checkSameValueWAW(writeOp, defWrite)) {
rewriter.modifyOpInPlace(writeToModify, [&]() {
- writeToModify.getSourceMutable().assign(defWrite.getSource());
+ writeToModify.getBaseMutable().assign(defWrite.getBase());
});
return success();
}
@@ -4971,7 +4970,7 @@ public:
if (!defWrite->hasOneUse())
break;
writeToModify = defWrite;
- defWrite = defWrite.getSource().getDefiningOp<vector::TransferWriteOp>();
+ defWrite = defWrite.getBase().getDefiningOp<vector::TransferWriteOp>();
}
return failure();
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp
index 1caec5b..b2272c5 100644
--- a/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -52,7 +52,7 @@ struct TransferReadOpInterface
auto readOp = cast<vector::TransferReadOp>(op);
assert(isa<TensorType>(readOp.getShapedType()) &&
"only tensor types expected");
- FailureOr<Value> buffer = getBuffer(rewriter, readOp.getSource(), options);
+ FailureOr<Value> buffer = getBuffer(rewriter, readOp.getBase(), options);
if (failed(buffer))
return failure();
replaceOpWithNewBufferizedOp<vector::TransferReadOp>(
@@ -110,7 +110,7 @@ struct TransferWriteOpInterface
// Create a new transfer_write on buffer that doesn't have a return value.
FailureOr<Value> resultBuffer =
- getBuffer(rewriter, writeOp.getSource(), options);
+ getBuffer(rewriter, writeOp.getBase(), options);
if (failed(resultBuffer))
return failure();
rewriter.create<vector::TransferWriteOp>(
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp
index 1f6cac2..ba21092 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp
@@ -222,7 +222,7 @@ public:
// Replace the `vector.mask` operation.
rewriter.replaceOpWithNewOp<TransferReadOp>(
- maskingOp.getOperation(), readOp.getVectorType(), readOp.getSource(),
+ maskingOp.getOperation(), readOp.getVectorType(), readOp.getBase(),
readOp.getIndices(), readOp.getPermutationMap(), readOp.getPadding(),
maskingOp.getMask(), readOp.getInBounds());
return success();
@@ -245,7 +245,7 @@ public:
// Replace the `vector.mask` operation.
rewriter.replaceOpWithNewOp<TransferWriteOp>(
maskingOp.getOperation(), resultType, writeOp.getVector(),
- writeOp.getSource(), writeOp.getIndices(), writeOp.getPermutationMap(),
+ writeOp.getBase(), writeOp.getIndices(), writeOp.getPermutationMap(),
maskingOp.getMask(), writeOp.getInBounds());
return success();
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index c5d29c0..5b81d0d 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -139,7 +139,7 @@ struct TransferReadPermutationLowering
VectorType newReadType = VectorType::get(
newVectorShape, op.getVectorType().getElementType(), newScalableDims);
Value newRead = rewriter.create<vector::TransferReadOp>(
- op.getLoc(), newReadType, op.getSource(), op.getIndices(),
+ op.getLoc(), newReadType, op.getBase(), op.getIndices(),
AffineMapAttr::get(newMap), op.getPadding(), op.getMask(),
newInBoundsAttr);
@@ -214,7 +214,7 @@ struct TransferWritePermutationLowering
auto newMap = AffineMap::getMinorIdentityMap(
map.getNumDims(), map.getNumResults(), rewriter.getContext());
auto newWrite = rewriter.create<vector::TransferWriteOp>(
- op.getLoc(), newVec, op.getSource(), op.getIndices(),
+ op.getLoc(), newVec, op.getBase(), op.getIndices(),
AffineMapAttr::get(newMap), op.getMask(), newInBoundsAttr);
if (newWrite.hasPureTensorSemantics())
return newWrite.getResult();
@@ -300,7 +300,7 @@ struct TransferWriteNonPermutationLowering
}
ArrayAttr newInBoundsAttr = rewriter.getBoolArrayAttr(newInBoundsValues);
auto newWrite = rewriter.create<vector::TransferWriteOp>(
- op.getLoc(), newVec, op.getSource(), op.getIndices(),
+ op.getLoc(), newVec, op.getBase(), op.getIndices(),
AffineMapAttr::get(newMap), newMask, newInBoundsAttr);
if (newWrite.hasPureTensorSemantics())
return newWrite.getResult();
@@ -371,7 +371,7 @@ struct TransferOpReduceRank
op.getInBoundsAttr().getValue().take_back(reducedShapeRank))
: ArrayAttr();
Value newRead = rewriter.create<vector::TransferReadOp>(
- op.getLoc(), newReadType, op.getSource(), op.getIndices(),
+ op.getLoc(), newReadType, op.getBase(), op.getIndices(),
AffineMapAttr::get(newMap), op.getPadding(), op.getMask(),
newInBoundsAttr);
return rewriter
@@ -474,12 +474,12 @@ struct TransferReadToVectorLoadLowering
Value fill = rewriter.create<vector::SplatOp>(
read.getLoc(), unbroadcastedVectorType, read.getPadding());
res = rewriter.create<vector::MaskedLoadOp>(
- read.getLoc(), unbroadcastedVectorType, read.getSource(),
+ read.getLoc(), unbroadcastedVectorType, read.getBase(),
read.getIndices(), read.getMask(), fill);
} else {
- res = rewriter.create<vector::LoadOp>(
- read.getLoc(), unbroadcastedVectorType, read.getSource(),
- read.getIndices());
+ res = rewriter.create<vector::LoadOp>(read.getLoc(),
+ unbroadcastedVectorType,
+ read.getBase(), read.getIndices());
}
// Insert a broadcasting op if required.
@@ -570,11 +570,11 @@ struct TransferWriteToVectorStoreLowering
});
rewriter.create<vector::MaskedStoreOp>(
- write.getLoc(), write.getSource(), write.getIndices(),
- write.getMask(), write.getVector());
+ write.getLoc(), write.getBase(), write.getIndices(), write.getMask(),
+ write.getVector());
} else {
rewriter.create<vector::StoreOp>(write.getLoc(), write.getVector(),
- write.getSource(), write.getIndices());
+ write.getBase(), write.getIndices());
}
// There's no return value for StoreOps. Use Value() to signal success to
// matchAndRewrite.
diff --git a/mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp b/mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp
index e8e178f..392bbb1 100644
--- a/mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp
@@ -37,7 +37,7 @@ struct TransferReadOpSubsetExtractionOpInterface
: public SubsetExtractionOpInterface::ExternalModel<
TransferReadOpSubsetExtractionOpInterface, vector::TransferReadOp> {
OpOperand &getSourceOperand(Operation *op) const {
- return cast<vector::TransferReadOp>(op).getSourceMutable();
+ return cast<vector::TransferReadOp>(op).getBaseMutable();
}
};
@@ -49,7 +49,7 @@ struct TransferWriteOpSubsetInsertionOpInterface
}
OpOperand &getDestinationOperand(Operation *op) const {
- return cast<vector::TransferWriteOp>(op).getSourceMutable();
+ return cast<vector::TransferWriteOp>(op).getBaseMutable();
}
Value buildSubsetExtraction(Operation *op, OpBuilder &builder,
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
index f8a9b39..045c192 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
@@ -718,7 +718,7 @@ struct WarpOpTransferRead : public WarpDistributionPattern {
auto read = operand->get().getDefiningOp<vector::TransferReadOp>();
// Source must be defined outside of the region.
- if (!warpOp.isDefinedOutsideOfRegion(read.getSource()))
+ if (!warpOp.isDefinedOutsideOfRegion(read.getBase()))
return rewriter.notifyMatchFailure(
read, "source must be defined outside of the region");
@@ -802,7 +802,7 @@ struct WarpOpTransferRead : public WarpDistributionPattern {
hasMask ? newWarpOp.getResult(newRetIndices[newRetIndices.size() - 1])
: Value();
auto newRead = rewriter.create<vector::TransferReadOp>(
- read.getLoc(), distributedVal.getType(), read.getSource(), newIndices,
+ read.getLoc(), distributedVal.getType(), read.getBase(), newIndices,
read.getPermutationMapAttr(), newPadding, newMask,
read.getInBoundsAttr());
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
index 68a44ea..067d4e3 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
@@ -230,7 +230,7 @@ struct CastAwayTransferReadLeadingOneDim
if (read.getTransferRank() == 0)
return failure();
- auto shapedType = cast<ShapedType>(read.getSource().getType());
+ auto shapedType = cast<ShapedType>(read.getBase().getType());
if (shapedType.getElementType() != read.getVectorType().getElementType())
return failure();
@@ -260,7 +260,7 @@ struct CastAwayTransferReadLeadingOneDim
}
auto newRead = rewriter.create<vector::TransferReadOp>(
- read.getLoc(), newType, read.getSource(), read.getIndices(),
+ read.getLoc(), newType, read.getBase(), read.getIndices(),
AffineMapAttr::get(newMap), read.getPadding(), mask, inBoundsAttr);
rewriter.replaceOpWithNewOp<vector::BroadcastOp>(read, oldType, newRead);
@@ -284,7 +284,7 @@ struct CastAwayTransferWriteLeadingOneDim
if (write.getTransferRank() == 0)
return failure();
- auto shapedType = dyn_cast<ShapedType>(write.getSource().getType());
+ auto shapedType = dyn_cast<ShapedType>(write.getBase().getType());
if (shapedType.getElementType() != write.getVectorType().getElementType())
return failure();
@@ -314,13 +314,13 @@ struct CastAwayTransferWriteLeadingOneDim
Value newMask = dropUnitDimsFromMask(
rewriter, write.getLoc(), write.getMask(), newType, newMap, maskType);
rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
- write, newVector, write.getSource(), write.getIndices(),
+ write, newVector, write.getBase(), write.getIndices(),
AffineMapAttr::get(newMap), newMask, inBoundsAttr);
return success();
}
rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
- write, newVector, write.getSource(), write.getIndices(),
+ write, newVector, write.getBase(), write.getIndices(),
AffineMapAttr::get(newMap), inBoundsAttr);
return success();
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index a560aa1..004bead 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -1249,7 +1249,7 @@ struct ConvertVectorTransferRead final
auto loc = op.getLoc();
auto containerElemTy =
- cast<MemRefType>(adaptor.getSource().getType()).getElementType();
+ cast<MemRefType>(adaptor.getBase().getType()).getElementType();
Type emulatedElemTy = op.getType().getElementType();
int emulatedBits = emulatedElemTy.getIntOrFloatBitWidth();
int containerBits = containerElemTy.getIntOrFloatBitWidth();
@@ -1272,7 +1272,7 @@ struct ConvertVectorTransferRead final
adaptor.getPadding());
auto stridedMetadata =
- rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getSource());
+ rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
memref::LinearizedMemRefInfo linearizedInfo;
@@ -1294,7 +1294,7 @@ struct ConvertVectorTransferRead final
emulatedPerContainerElem);
auto newRead = rewriter.create<vector::TransferReadOp>(
- loc, VectorType::get(numElements, containerElemTy), adaptor.getSource(),
+ loc, VectorType::get(numElements, containerElemTy), adaptor.getBase(),
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices),
newPadding);
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
index 999fb9c..d4d07c7 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
@@ -92,7 +92,7 @@ void TransferOptimization::deadStoreOp(vector::TransferWriteOp write) {
<< "\n");
llvm::SmallVector<Operation *, 8> blockingAccesses;
Operation *firstOverwriteCandidate = nullptr;
- Value source = memref::skipViewLikeOps(cast<MemrefValue>(write.getSource()));
+ Value source = memref::skipViewLikeOps(cast<MemrefValue>(write.getBase()));
llvm::SmallVector<Operation *, 32> users(source.getUsers().begin(),
source.getUsers().end());
llvm::SmallDenseSet<Operation *, 32> processed;
@@ -112,8 +112,8 @@ void TransferOptimization::deadStoreOp(vector::TransferWriteOp write) {
if (auto nextWrite = dyn_cast<vector::TransferWriteOp>(user)) {
// Check candidate that can override the store.
if (memref::isSameViewOrTrivialAlias(
- cast<MemrefValue>(nextWrite.getSource()),
- cast<MemrefValue>(write.getSource())) &&
+ cast<MemrefValue>(nextWrite.getBase()),
+ cast<MemrefValue>(write.getBase())) &&
checkSameValueWAW(nextWrite, write) &&
postDominators.postDominates(nextWrite, write)) {
if (firstOverwriteCandidate == nullptr ||
@@ -178,7 +178,7 @@ void TransferOptimization::storeToLoadForwarding(vector::TransferReadOp read) {
<< "\n");
SmallVector<Operation *, 8> blockingWrites;
vector::TransferWriteOp lastwrite = nullptr;
- Value source = memref::skipViewLikeOps(cast<MemrefValue>(read.getSource()));
+ Value source = memref::skipViewLikeOps(cast<MemrefValue>(read.getBase()));
llvm::SmallVector<Operation *, 32> users(source.getUsers().begin(),
source.getUsers().end());
llvm::SmallDenseSet<Operation *, 32> processed;
@@ -202,8 +202,8 @@ void TransferOptimization::storeToLoadForwarding(vector::TransferReadOp read) {
/*testDynamicValueUsingBounds=*/true))
continue;
if (memref::isSameViewOrTrivialAlias(
- cast<MemrefValue>(read.getSource()),
- cast<MemrefValue>(write.getSource())) &&
+ cast<MemrefValue>(read.getBase()),
+ cast<MemrefValue>(write.getBase())) &&
dominators.dominates(write, read) && checkSameValueRAW(write, read)) {
if (lastwrite == nullptr || dominators.dominates(lastwrite, write))
lastwrite = write;
@@ -351,7 +351,7 @@ class TransferReadDropUnitDimsPattern
auto loc = transferReadOp.getLoc();
Value vector = transferReadOp.getVector();
VectorType vectorType = cast<VectorType>(vector.getType());
- Value source = transferReadOp.getSource();
+ Value source = transferReadOp.getBase();
MemRefType sourceType = dyn_cast<MemRefType>(source.getType());
// TODO: support tensor types.
if (!sourceType)
@@ -433,7 +433,7 @@ class TransferWriteDropUnitDimsPattern
auto loc = transferWriteOp.getLoc();
Value vector = transferWriteOp.getVector();
VectorType vectorType = cast<VectorType>(vector.getType());
- Value source = transferWriteOp.getSource();
+ Value source = transferWriteOp.getBase();
MemRefType sourceType = dyn_cast<MemRefType>(source.getType());
// TODO: support tensor type.
if (!sourceType)
@@ -604,7 +604,7 @@ public:
auto loc = transferReadOp.getLoc();
Value vector = transferReadOp.getVector();
VectorType vectorType = cast<VectorType>(vector.getType());
- auto source = transferReadOp.getSource();
+ auto source = transferReadOp.getBase();
MemRefType sourceType = dyn_cast<MemRefType>(source.getType());
// 0. Check pre-conditions
@@ -695,7 +695,7 @@ public:
auto loc = transferWriteOp.getLoc();
Value vector = transferWriteOp.getVector();
VectorType vectorType = cast<VectorType>(vector.getType());
- Value source = transferWriteOp.getSource();
+ Value source = transferWriteOp.getBase();
MemRefType sourceType = dyn_cast<MemRefType>(source.getType());
// 0. Check pre-conditions
@@ -851,12 +851,12 @@ class RewriteScalarExtractElementOfTransferRead
*getConstantIntValue(ofr));
}
}
- if (isa<MemRefType>(xferOp.getSource().getType())) {
- rewriter.replaceOpWithNewOp<memref::LoadOp>(extractOp, xferOp.getSource(),
+ if (isa<MemRefType>(xferOp.getBase().getType())) {
+ rewriter.replaceOpWithNewOp<memref::LoadOp>(extractOp, xferOp.getBase(),
newIndices);
} else {
rewriter.replaceOpWithNewOp<tensor::ExtractOp>(
- extractOp, xferOp.getSource(), newIndices);
+ extractOp, xferOp.getBase(), newIndices);
}
return success();
@@ -899,12 +899,12 @@ class RewriteScalarExtractOfTransferRead
extractOp.getLoc(), *getConstantIntValue(ofr));
}
}
- if (isa<MemRefType>(xferOp.getSource().getType())) {
- rewriter.replaceOpWithNewOp<memref::LoadOp>(extractOp, xferOp.getSource(),
+ if (isa<MemRefType>(xferOp.getBase().getType())) {
+ rewriter.replaceOpWithNewOp<memref::LoadOp>(extractOp, xferOp.getBase(),
newIndices);
} else {
rewriter.replaceOpWithNewOp<tensor::ExtractOp>(
- extractOp, xferOp.getSource(), newIndices);
+ extractOp, xferOp.getBase(), newIndices);
}
return success();
@@ -932,12 +932,12 @@ class RewriteScalarWrite : public OpRewritePattern<vector::TransferWriteOp> {
Value scalar =
rewriter.create<vector::ExtractOp>(xferOp.getLoc(), xferOp.getVector());
// Construct a scalar store.
- if (isa<MemRefType>(xferOp.getSource().getType())) {
+ if (isa<MemRefType>(xferOp.getBase().getType())) {
rewriter.replaceOpWithNewOp<memref::StoreOp>(
- xferOp, scalar, xferOp.getSource(), xferOp.getIndices());
+ xferOp, scalar, xferOp.getBase(), xferOp.getIndices());
} else {
rewriter.replaceOpWithNewOp<tensor::InsertOp>(
- xferOp, scalar, xferOp.getSource(), xferOp.getIndices());
+ xferOp, scalar, xferOp.getBase(), xferOp.getIndices());
}
return success();
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
index b801692..256c8cb 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
@@ -58,7 +58,7 @@ static Value createInBoundsCond(RewriterBase &b,
b, loc, b.getAffineDimExpr(0) + b.getAffineConstantExpr(vectorSize),
{xferOp.getIndices()[indicesIdx]});
OpFoldResult dimSz =
- memref::getMixedSize(b, loc, xferOp.getSource(), indicesIdx);
+ memref::getMixedSize(b, loc, xferOp.getBase(), indicesIdx);
auto maybeCstSum = getConstantIntValue(sum);
auto maybeCstDimSz = getConstantIntValue(dimSz);
if (maybeCstSum && maybeCstDimSz && *maybeCstSum <= *maybeCstDimSz)
@@ -185,7 +185,7 @@ static Value castToCompatibleMemRefType(OpBuilder &b, Value memref,
}
/// Operates under a scoped context to build the intersection between the
-/// view `xferOp.getSource()` @ `xferOp.getIndices()` and the view `alloc`.
+/// view `xferOp.getbase()` @ `xferOp.getIndices()` and the view `alloc`.
// TODO: view intersection/union/differences should be a proper std op.
static std::pair<Value, Value>
createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp,
@@ -202,8 +202,8 @@ createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp,
auto isaWrite = isa<vector::TransferWriteOp>(xferOp);
xferOp.zipResultAndIndexing([&](int64_t resultIdx, int64_t indicesIdx) {
using MapList = ArrayRef<ArrayRef<AffineExpr>>;
- Value dimMemRef = b.create<memref::DimOp>(xferOp.getLoc(),
- xferOp.getSource(), indicesIdx);
+ Value dimMemRef =
+ b.create<memref::DimOp>(xferOp.getLoc(), xferOp.getBase(), indicesIdx);
Value dimAlloc = b.create<memref::DimOp>(loc, alloc, resultIdx);
Value index = xferOp.getIndices()[indicesIdx];
AffineExpr i, j, k;
@@ -221,9 +221,9 @@ createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp,
SmallVector<OpFoldResult> destIndices(memrefRank, b.getIndexAttr(0));
SmallVector<OpFoldResult> strides(memrefRank, b.getIndexAttr(1));
auto copySrc = b.create<memref::SubViewOp>(
- loc, isaWrite ? alloc : xferOp.getSource(), srcIndices, sizes, strides);
+ loc, isaWrite ? alloc : xferOp.getBase(), srcIndices, sizes, strides);
auto copyDest = b.create<memref::SubViewOp>(
- loc, isaWrite ? xferOp.getSource() : alloc, destIndices, sizes, strides);
+ loc, isaWrite ? xferOp.getBase() : alloc, destIndices, sizes, strides);
return std::make_pair(copySrc, copyDest);
}
@@ -252,7 +252,7 @@ createFullPartialLinalgCopy(RewriterBase &b, vector::TransferReadOp xferOp,
MemRefType compatibleMemRefType, Value alloc) {
Location loc = xferOp.getLoc();
Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
- Value memref = xferOp.getSource();
+ Value memref = xferOp.getBase();
return b.create<scf::IfOp>(
loc, inBoundsCond,
[&](OpBuilder &b, Location loc) {
@@ -305,7 +305,7 @@ static scf::IfOp createFullPartialVectorTransferRead(
Location loc = xferOp.getLoc();
scf::IfOp fullPartialIfOp;
Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
- Value memref = xferOp.getSource();
+ Value memref = xferOp.getBase();
return b.create<scf::IfOp>(
loc, inBoundsCond,
[&](OpBuilder &b, Location loc) {
@@ -352,7 +352,7 @@ getLocationToWriteFullVec(RewriterBase &b, vector::TransferWriteOp xferOp,
MemRefType compatibleMemRefType, Value alloc) {
Location loc = xferOp.getLoc();
Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
- Value memref = xferOp.getSource();
+ Value memref = xferOp.getBase();
return b
.create<scf::IfOp>(
loc, inBoundsCond,
@@ -509,7 +509,7 @@ static Operation *getAutomaticAllocationScope(Operation *op) {
///
/// Preconditions:
/// 1. `xferOp.getPermutationMap()` must be a minor identity map
-/// 2. the rank of the `xferOp.getSource()` and the rank of the
+/// 2. the rank of the `xferOp.getBase()` and the rank of the
/// `xferOp.getVector()` must be equal. This will be relaxed in the future
/// but requires rank-reducing subviews.
LogicalResult mlir::vector::splitFullAndPartialTransfer(
@@ -611,7 +611,7 @@ LogicalResult mlir::vector::splitFullAndPartialTransfer(
// The operation is cloned to prevent deleting information needed for the
// later IR creation.
IRMapping mapping;
- mapping.map(xferWriteOp.getSource(), memrefAndIndices.front());
+ mapping.map(xferWriteOp.getBase(), memrefAndIndices.front());
mapping.map(xferWriteOp.getIndices(), memrefAndIndices.drop_front());
auto *clone = b.clone(*xferWriteOp, mapping);
clone->setAttr(xferWriteOp.getInBoundsAttrName(), inBoundsAttr);
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index b94c5fc..c635be6 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -1265,7 +1265,7 @@ public:
unsigned lastIndex = llvm::size(xferOp.getIndices()) - 1;
Value off = xferOp.getIndices()[lastIndex];
Value dim =
- vector::createOrFoldDimOp(rewriter, loc, xferOp.getSource(), lastIndex);
+ vector::createOrFoldDimOp(rewriter, loc, xferOp.getBase(), lastIndex);
Value b = rewriter.create<arith::SubIOp>(loc, dim.getType(), dim, off);
Value mask = rewriter.create<vector::CreateMaskOp>(
loc,
@@ -1437,7 +1437,7 @@ class DropInnerMostUnitDimsTransferRead
if (readOp.getMask())
return failure();
- auto srcType = dyn_cast<MemRefType>(readOp.getSource().getType());
+ auto srcType = dyn_cast<MemRefType>(readOp.getBase().getType());
if (!srcType)
return failure();
@@ -1469,7 +1469,7 @@ class DropInnerMostUnitDimsTransferRead
auto loc = readOp.getLoc();
SmallVector<OpFoldResult> sizes =
- memref::getMixedSizes(rewriter, loc, readOp.getSource());
+ memref::getMixedSizes(rewriter, loc, readOp.getBase());
SmallVector<OpFoldResult> offsets(srcType.getRank(),
rewriter.getIndexAttr(0));
SmallVector<OpFoldResult> strides(srcType.getRank(),
@@ -1480,7 +1480,7 @@ class DropInnerMostUnitDimsTransferRead
ArrayAttr inBoundsAttr = rewriter.getArrayAttr(
readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop));
Value rankedReducedView = rewriter.create<memref::SubViewOp>(
- loc, resultMemrefType, readOp.getSource(), offsets, sizes, strides);
+ loc, resultMemrefType, readOp.getBase(), offsets, sizes, strides);
auto permMap = getTransferMinorIdentityMap(
cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType);
Value result = rewriter.create<vector::TransferReadOp>(
@@ -1527,7 +1527,7 @@ class DropInnerMostUnitDimsTransferWrite
if (writeOp.getMask())
return failure();
- auto srcType = dyn_cast<MemRefType>(writeOp.getSource().getType());
+ auto srcType = dyn_cast<MemRefType>(writeOp.getBase().getType());
if (!srcType)
return failure();
@@ -1559,7 +1559,7 @@ class DropInnerMostUnitDimsTransferWrite
Location loc = writeOp.getLoc();
SmallVector<OpFoldResult> sizes =
- memref::getMixedSizes(rewriter, loc, writeOp.getSource());
+ memref::getMixedSizes(rewriter, loc, writeOp.getBase());
SmallVector<OpFoldResult> offsets(srcType.getRank(),
rewriter.getIndexAttr(0));
SmallVector<OpFoldResult> strides(srcType.getRank(),
@@ -1571,7 +1571,7 @@ class DropInnerMostUnitDimsTransferWrite
writeOp.getInBoundsAttr().getValue().drop_back(dimsToDrop));
Value rankedReducedView = rewriter.create<memref::SubViewOp>(
- loc, resultMemrefType, writeOp.getSource(), offsets, sizes, strides);
+ loc, resultMemrefType, writeOp.getBase(), offsets, sizes, strides);
auto permMap = getTransferMinorIdentityMap(
cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType);
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
index dffb13c..1cc477d 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
@@ -164,7 +164,7 @@ struct UnrollTransferReadPattern
sliceTransferIndices(elementOffsets, originalIndices,
readOp.getPermutationMap(), loc, rewriter);
auto slicedRead = rewriter.create<vector::TransferReadOp>(
- loc, targetType, readOp.getSource(), indices,
+ loc, targetType, readOp.getBase(), indices,
readOp.getPermutationMapAttr(), readOp.getPadding(), readOp.getMask(),
readOp.getInBoundsAttr());
@@ -215,7 +215,7 @@ struct UnrollTransferWritePattern
sliceTransferIndices(elementOffsets, originalIndices,
writeOp.getPermutationMap(), loc, rewriter);
Operation *slicedWrite = rewriter.create<vector::TransferWriteOp>(
- loc, slicedVector, resultTensor ? resultTensor : writeOp.getSource(),
+ loc, slicedVector, resultTensor ? resultTensor : writeOp.getBase(),
indices, writeOp.getPermutationMapAttr(), writeOp.getInBoundsAttr());
// For the tensor case update the destination for the next transfer write.
if (!slicedWrite->getResults().empty())
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index 16a2732..399e902 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -312,7 +312,7 @@ SmallVector<OpFoldResult> vector::getMixedSizesXfer(bool hasTensorSemantics,
Value base = TypeSwitch<Operation *, Value>(xfer)
.Case<vector::TransferReadOp>(
- [&](auto readOp) { return readOp.getSource(); })
+ [&](auto readOp) { return readOp.getBase(); })
.Case<vector::TransferWriteOp>(
[&](auto writeOp) { return writeOp.getOperand(1); });
diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
index f2cfa50..c99e925 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Arith/Utils/Utils.h"
+#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
#include "mlir/Dialect/XeGPU/IR/XeGPU.h"
#include "mlir/IR/Builders.h"
diff --git a/mlir/lib/Dialect/XeGPU/Transforms/CMakeLists.txt b/mlir/lib/Dialect/XeGPU/Transforms/CMakeLists.txt
index 901e02d..892eb79 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/XeGPU/Transforms/CMakeLists.txt
@@ -1,6 +1,7 @@
add_mlir_dialect_library(MLIRXeGPUTransforms
XeGPUFoldAliasOps.cpp
XeGPUSubgroupDistribute.cpp
+ XeGPUUnroll.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/XeGPU
diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp
index 2300d9e..acdbe7b 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp
@@ -157,8 +157,9 @@ void LayoutInfo::print(raw_ostream &os) const {
laneLayout.print(os);
os << ", lane_data: ";
laneData.print(os);
- } else
+ } else {
os << "Not assigned.";
+ }
}
LayoutInfo LayoutInfo::meet(const LayoutInfo &lhs, const LayoutInfo &rhs) {
diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
new file mode 100644
index 0000000..44d45dd
--- /dev/null
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
@@ -0,0 +1,427 @@
+//===- XeGPUUnroll.cpp - patterns to do unrolling ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains patterns for unrolling XeGPU operations. It follows a
+// similar concept and design as vector unroll patterns, serving as a complement
+// to them.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/XeGPU/Transforms/Passes.h"
+
+#include "mlir/Dialect/Utils/IndexingUtils.h"
+#include "mlir/Dialect/XeGPU/IR/XeGPU.h"
+#include "mlir/Dialect/XeGPU/Transforms/Transforms.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
+#include <numeric>
+
+namespace mlir {
+namespace xegpu {
+#define GEN_PASS_DEF_XEGPUUNROLL
+#include "mlir/Dialect/XeGPU/Transforms/Passes.h.inc"
+} // namespace xegpu
+} // namespace mlir
+
+#define DEBUG_TYPE "xegpu-unroll"
+#define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ")
+#define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n")
+
+using namespace mlir;
+
+namespace {
+
+template <typename SourceOp>
+struct UnrollPattern : public OpRewritePattern<SourceOp> {
+ UnrollPattern(MLIRContext *context, const xegpu::UnrollOptions &options,
+ PatternBenefit benefit = 1)
+ : OpRewritePattern<SourceOp>(context, benefit), options(options) {}
+
+protected:
+ /// Return the target shape for the given `op`. Return std::nullopt if the
+ /// op shouldn't be or cannot be unrolled.
+ std::optional<SmallVector<int64_t>> getTargetShape(Operation *op) const {
+ LDBG("");
+ LDBG("Get unroll shape for: " << *op);
+
+ if (options.filterConstraint && failed(options.filterConstraint(op))) {
+ LDBG("--no filter constraint -> BAIL");
+ return std::nullopt;
+ }
+
+ assert(options.nativeShape &&
+ "expects the native shape for native shape call back function.");
+ auto nativeShape = options.nativeShape(op);
+ return nativeShape;
+ }
+
+ SmallVector<Type> getUnrolledTypes(ShapedType type,
+ ArrayRef<int64_t> tileShape) const {
+ return options.getUnrolledTypes(type, tileShape);
+ }
+
+ /// Emulate the the unpack behavior using insert_strided_slice for VectorType
+ /// values and unrealized_conversion_cast for TensorDescType values.
+ Value unpack(ValueRange srcs, Type destTy, ArrayRef<int64_t> blockSize,
+ Location loc, PatternRewriter &rewriter) const {
+ if (auto vecTy = dyn_cast<VectorType>(destTy)) {
+ assert(vecTy.getRank() == static_cast<int64_t>(blockSize.size()) &&
+ "Expecting blockSize size to match the rank of destTy.");
+ auto shape = vecTy.getShape();
+ auto zeroAttr = rewriter.getZeroAttr(vecTy.getElementType());
+
+ Value result = rewriter.create<arith::ConstantOp>(
+ loc, vecTy, DenseElementsAttr::get(vecTy, zeroAttr));
+ for (auto [src, offsets] :
+ llvm::zip_equal(srcs, StaticTileOffsetRange(shape, blockSize))) {
+ SmallVector<int64_t> staticStrides(offsets.size(), 1);
+ result = rewriter.create<vector::InsertStridedSliceOp>(
+ loc, src, result, offsets, staticStrides);
+ }
+ return result;
+ }
+
+ if (isa<xegpu::TensorDescType>(destTy)) {
+ auto attr = NamedAttribute(rewriter.getStringAttr(unpackAttrName),
+ rewriter.getUnitAttr());
+ auto blkAttr = NamedAttribute(rewriter.getStringAttr(blockAttrName),
+ rewriter.getDenseI64ArrayAttr(blockSize));
+ auto castOp = rewriter.create<UnrealizedConversionCastOp>(
+ loc, destTy, srcs, ArrayRef<NamedAttribute>({attr, blkAttr}));
+ return castOp.getResult(0);
+ }
+
+ llvm_unreachable("Unexpected destTy.");
+ return Value();
+ }
+
+ /// Emulate the the pack behavior using extract_strided_slice for VectorType
+ /// values and unrealized_conversion_cast for TensorDescType values.
+ SmallVector<Value> pack(Value src, TypeRange destTypes,
+ ArrayRef<int64_t> blockSize, Location loc,
+ PatternRewriter &rewriter) const {
+ if (auto vecTy = dyn_cast<VectorType>(src.getType())) {
+ assert(vecTy.getRank() == static_cast<int64_t>(blockSize.size()) &&
+ "Expecting blockSize size to match the rank of src.");
+ auto shape = vecTy.getShape();
+ SmallVector<Value> results;
+ for (SmallVector<int64_t> offsets :
+ StaticTileOffsetRange(shape, blockSize)) {
+ SmallVector<int64_t> staticStrides(offsets.size(), 1);
+ auto slice = rewriter.create<vector::ExtractStridedSliceOp>(
+ loc, src, offsets, blockSize, staticStrides);
+ results.push_back(slice);
+ }
+ return results;
+ }
+
+ if (isa<xegpu::TensorDescType>(src.getType())) {
+ auto attr = NamedAttribute(rewriter.getStringAttr(packAttrName),
+ rewriter.getUnitAttr());
+ auto blkAttr = NamedAttribute(rewriter.getStringAttr(blockAttrName),
+ rewriter.getDenseI64ArrayAttr(blockSize));
+ auto castOp = rewriter.create<UnrealizedConversionCastOp>(
+ loc, destTypes, src, ArrayRef<NamedAttribute>({attr, blkAttr}));
+ return castOp.getResults();
+ }
+
+ llvm_unreachable("Unexpected src type.");
+ return SmallVector<Value>();
+ }
+
+private:
+ const char *const packAttrName = "__xegpu_blocking_pack__";
+ const char *const unpackAttrName = "__xegpu_blocking_unpack__";
+ const char *const blockAttrName = "__xegpu_blocking_tile_shape__";
+
+ xegpu::UnrollOptions options;
+};
+
+struct UnrollCreateNdOp : public UnrollPattern<xegpu::CreateNdDescOp> {
+ using UnrollPattern<xegpu::CreateNdDescOp>::UnrollPattern;
+ LogicalResult matchAndRewrite(xegpu::CreateNdDescOp op,
+ PatternRewriter &rewriter) const override {
+ Location loc = op.getLoc();
+ xegpu::TensorDescType tdescTy = op.getType();
+ int64_t rank = tdescTy.getRank();
+ ArrayRef<int64_t> shape = tdescTy.getShape();
+
+ std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
+ if (!targetShape || llvm::equal(*targetShape, shape))
+ return failure();
+
+ auto newTdescTy = getUnrolledTypes(tdescTy, *targetShape)[0];
+
+ auto addi = [&](OpFoldResult a, int64_t b) -> Value {
+ std::optional<int64_t> maybeInt = getConstantIntValue(a);
+ if (maybeInt) {
+ return rewriter.create<arith::ConstantIndexOp>(loc, *maybeInt + b);
+ } else {
+ auto aV = llvm::cast<Value>(a);
+ auto bV = rewriter.create<arith::ConstantIndexOp>(loc, b);
+ return rewriter.createOrFold<arith::AddIOp>(loc, aV, bV);
+ }
+ };
+
+ SmallVector<OpFoldResult> mixedOffsets = op.getMixedOffsets();
+
+ // For n-D memrefs where n > rank, we need to handle the last `rank`
+ // dimensions only, and keep the first `n-rank` dimensions as is.
+ SmallVector<OpFoldResult> oldOffsets = llvm::to_vector(
+ llvm::drop_begin(mixedOffsets, mixedOffsets.size() - rank));
+ auto validIdxes =
+ llvm::seq<int64_t>(mixedOffsets.size() - rank, mixedOffsets.size());
+
+ SmallVector<Value> newOps;
+ for (SmallVector<int64_t> offsets :
+ StaticTileOffsetRange(shape, *targetShape)) {
+
+ for (auto [idx, oldOff, offset] :
+ llvm::zip(validIdxes, oldOffsets, offsets))
+ mixedOffsets[idx] = addi(oldOff, offset);
+
+ auto newOp = rewriter.create<xegpu::CreateNdDescOp>(
+ loc, newTdescTy, op.getSource(), mixedOffsets, op.getMixedSizes(),
+ op.getMixedStrides());
+ newOps.push_back(newOp);
+ }
+ Value castOp = unpack(newOps, tdescTy, *targetShape, loc, rewriter);
+ rewriter.replaceOp(op, castOp);
+
+ return success();
+ }
+};
+
+struct UnrollUpdateNdOffsetOp : public UnrollPattern<xegpu::UpdateNdOffsetOp> {
+ using UnrollPattern<xegpu::UpdateNdOffsetOp>::UnrollPattern;
+ LogicalResult matchAndRewrite(xegpu::UpdateNdOffsetOp op,
+ PatternRewriter &rewriter) const override {
+ Location loc = op.getLoc();
+ xegpu::TensorDescType tdescTy = op.getTensorDescType();
+ ArrayRef<int64_t> shape = tdescTy.getShape();
+
+ std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
+ if (!targetShape || llvm::equal(*targetShape, shape))
+ return failure();
+
+ SmallVector<Type> convertedTdescTypes =
+ getUnrolledTypes(tdescTy, *targetShape);
+ SmallVector<Value> convertedTdesc = pack(
+ op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter);
+
+ SmallVector<Value> newOps;
+ for (auto t : convertedTdesc) {
+ auto newOp = rewriter.create<xegpu::UpdateNdOffsetOp>(
+ loc, t.getType(), t, op.getOffsets(), op.getConstOffsets());
+ newOps.push_back(newOp);
+ }
+ Value castOp = unpack(newOps, op.getType(), *targetShape, loc, rewriter);
+ rewriter.replaceOp(op, castOp);
+ return success();
+ }
+};
+
+struct UnrollPrefetchNdOp : public UnrollPattern<xegpu::PrefetchNdOp> {
+ using UnrollPattern<xegpu::PrefetchNdOp>::UnrollPattern;
+ LogicalResult matchAndRewrite(xegpu::PrefetchNdOp op,
+ PatternRewriter &rewriter) const override {
+ Location loc = op.getLoc();
+ xegpu::TensorDescType tdescTy = op.getTensorDescType();
+ ArrayRef<int64_t> shape = tdescTy.getShape();
+
+ std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
+ if (!targetShape || llvm::equal(*targetShape, shape))
+ return failure();
+
+ SmallVector<Type> convertedTdescTypes =
+ getUnrolledTypes(tdescTy, *targetShape);
+ SmallVector<Value> convertedTdesc = pack(
+ op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter);
+
+ for (auto t : convertedTdesc)
+ rewriter.create<xegpu::PrefetchNdOp>(loc, TypeRange(), t, op->getAttrs());
+
+ rewriter.eraseOp(op);
+ return success();
+ }
+};
+
+struct UnrollLoadNdOp : public UnrollPattern<xegpu::LoadNdOp> {
+ using UnrollPattern<xegpu::LoadNdOp>::UnrollPattern;
+ LogicalResult matchAndRewrite(xegpu::LoadNdOp op,
+ PatternRewriter &rewriter) const override {
+
+ Location loc = op.getLoc();
+ VectorType valueTy = op.getType();
+ xegpu::TensorDescType tdescTy = op.getTensorDescType();
+ ArrayRef<int64_t> shape = tdescTy.getShape();
+
+ std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
+ if (!targetShape || llvm::equal(*targetShape, shape))
+ return failure();
+
+ Type elemTy = tdescTy.getElementType();
+ VectorType newValueTy = valueTy.cloneWith(*targetShape, elemTy);
+
+ SmallVector<Type> convertedTdescTypes =
+ getUnrolledTypes(tdescTy, *targetShape);
+ SmallVector<Value> convertedTdescs = pack(
+ op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter);
+
+ SmallVector<Value> newOps;
+ for (auto t : convertedTdescs) {
+ auto newOp =
+ rewriter.create<xegpu::LoadNdOp>(loc, newValueTy, t, op->getAttrs());
+ newOps.push_back(newOp);
+ }
+
+ Value castOp = unpack(newOps, op.getType(), *targetShape, loc, rewriter);
+
+ rewriter.replaceOp(op, castOp);
+ return success();
+ }
+};
+
+struct UnrollStoreNdOp : public UnrollPattern<xegpu::StoreNdOp> {
+ using UnrollPattern<xegpu::StoreNdOp>::UnrollPattern;
+ LogicalResult matchAndRewrite(xegpu::StoreNdOp op,
+ PatternRewriter &rewriter) const override {
+ Location loc = op.getLoc();
+ VectorType valueTy = op.getValueType();
+ xegpu::TensorDescType tdescTy = op.getTensorDescType();
+ ArrayRef<int64_t> shape = tdescTy.getShape();
+
+ std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
+ if (!targetShape || llvm::equal(*targetShape, shape))
+ return failure();
+
+ SmallVector<Type> convertedValTypes =
+ getUnrolledTypes(valueTy, *targetShape);
+ SmallVector<Type> convertedTdescTypes =
+ getUnrolledTypes(tdescTy, *targetShape);
+
+ SmallVector<Value> convertedValues =
+ pack(op.getValue(), convertedValTypes, *targetShape, loc, rewriter);
+ SmallVector<Value> convertedTdescs = pack(
+ op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter);
+
+ for (auto [v, t] : llvm::zip(convertedValues, convertedTdescs))
+ rewriter.create<xegpu::StoreNdOp>(loc, v, t, op.getL1HintAttr(),
+ op.getL2HintAttr(), op.getL3HintAttr());
+
+ rewriter.eraseOp(op);
+ return success();
+ }
+};
+
+struct UnrollDpasOp : public UnrollPattern<xegpu::DpasOp> {
+ using UnrollPattern<xegpu::DpasOp>::UnrollPattern;
+ LogicalResult matchAndRewrite(xegpu::DpasOp op,
+ PatternRewriter &rewriter) const override {
+ Location loc = op.getLoc();
+
+ // expecting every operands is a 2D Vector
+ if (llvm::any_of(op->getOperandTypes(), [&](Type type) {
+ auto vecTy = dyn_cast<VectorType>(type);
+ return !vecTy || vecTy.getRank() != 2;
+ }))
+ return failure();
+
+ // A vector of 3 elements should be returned, representing M, K, N
+ // respectively.
+ std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
+ if (!targetShape || targetShape->size() != 3)
+ return failure();
+ auto M = (*targetShape)[0];
+ auto K = (*targetShape)[1];
+ auto N = (*targetShape)[2];
+
+ int64_t aBlockSize[2] = {M, K};
+ int64_t bBlockSize[2] = {K, N};
+ int64_t cBlockSize[2] = {M, N};
+
+ auto packWrapper = [&](TypedValue<VectorType> val,
+ ArrayRef<int64_t> blockSize) {
+ VectorType type = val.getType();
+ std::optional<SmallVector<int64_t>> grids =
+ computeShapeRatio(type.getShape(), blockSize);
+ assert(grids && "Expecting grids to be computed.");
+ auto numNewOps = computeProduct(*grids);
+ if (numNewOps == 1)
+ return SmallVector<Value>({val});
+ VectorType newVecTy = type.cloneWith(blockSize, type.getElementType());
+ SmallVector<Type> convertedTypes(numNewOps, newVecTy);
+ SmallVector<Value> values =
+ pack(val, convertedTypes, blockSize, loc, rewriter);
+ return values;
+ };
+
+ auto a = op.getLhs();
+ auto b = op.getRhs();
+ auto c = op.getAcc();
+
+ auto aShape = a.getType().getShape();
+ auto bShape = b.getType().getShape();
+
+ SmallVector<Value> aVals, bVals, cVals;
+ aVals = packWrapper(a, aBlockSize);
+ bVals = packWrapper(b, bBlockSize);
+
+ if (c)
+ cVals = packWrapper(c, cBlockSize);
+
+ // Skip the operation if every operand has an invalid blocking size (empty)
+ // or if the original shape matches the blocking size (size == 1).
+ auto ranges = c ? SmallVector<ValueRange>({aVals, bVals, cVals})
+ : SmallVector<ValueRange>({aVals, bVals});
+ if (llvm::any_of(ranges, [](auto &v) { return v.size() == 0; }) ||
+ llvm::all_of(ranges, [](auto &v) { return v.size() == 1; }))
+ return failure();
+
+ VectorType resultTy = op.getResult().getType();
+ auto vecTy = VectorType::get(cBlockSize, resultTy.getElementType());
+
+ int64_t mIters = aShape[0] / M;
+ int64_t kIters = aShape[1] / K;
+ int64_t nIters = bShape[1] / N;
+
+ SmallVector<Value> newOps;
+ for (int64_t i = 0; i < mIters; ++i) {
+ for (int64_t j = 0; j < nIters; ++j) {
+ Value tmpC;
+ if (c)
+ tmpC = cVals[i * nIters + j]; // init with acc
+
+ for (int64_t k = 0; k < kIters; ++k) {
+ Value aVec = aVals[i * kIters + k];
+ Value bVec = bVals[k * nIters + j];
+ SmallVector<Value> operands({aVec, bVec});
+ if (tmpC)
+ operands.push_back(tmpC);
+
+ tmpC = rewriter.create<xegpu::DpasOp>(loc, vecTy, operands,
+ op->getAttrs());
+ }
+ newOps.push_back(tmpC);
+ }
+ }
+ Value castOp = unpack(newOps, resultTy, cBlockSize, loc, rewriter);
+ rewriter.replaceOp(op, castOp);
+ return success();
+ }
+};
+
+} // namespace
+
+void mlir::xegpu::populateXeGPUUnrollPatterns(
+ RewritePatternSet &patterns, const xegpu::UnrollOptions &options) {
+ patterns.add<UnrollCreateNdOp, UnrollUpdateNdOffsetOp, UnrollPrefetchNdOp,
+ UnrollLoadNdOp, UnrollStoreNdOp, UnrollDpasOp>(
+ patterns.getContext(), options);
+}
diff --git a/mlir/lib/ExecutionEngine/SyclRuntimeWrappers.cpp b/mlir/lib/ExecutionEngine/SyclRuntimeWrappers.cpp
index c250340..acb5d9d 100644
--- a/mlir/lib/ExecutionEngine/SyclRuntimeWrappers.cpp
+++ b/mlir/lib/ExecutionEngine/SyclRuntimeWrappers.cpp
@@ -65,8 +65,9 @@ static sycl::device getDefaultDevice() {
return syclDevice;
}
throw std::runtime_error("getDefaultDevice failed");
- } else
+ } else {
return syclDevice;
+ }
}
static sycl::context getDefaultContext() {
diff --git a/mlir/lib/IR/AffineMapDetail.h b/mlir/lib/IR/AffineMapDetail.h
index 32c9734..b306462 100644
--- a/mlir/lib/IR/AffineMapDetail.h
+++ b/mlir/lib/IR/AffineMapDetail.h
@@ -24,7 +24,9 @@ namespace detail {
struct AffineMapStorage final
: public StorageUniquer::BaseStorage,
- public llvm::TrailingObjects<AffineMapStorage, AffineExpr> {
+ private llvm::TrailingObjects<AffineMapStorage, AffineExpr> {
+ friend llvm::TrailingObjects<AffineMapStorage, AffineExpr>;
+
/// The hash key used for uniquing.
using KeyTy = std::tuple<unsigned, unsigned, ArrayRef<AffineExpr>>;
@@ -36,7 +38,7 @@ struct AffineMapStorage final
/// The affine expressions for this (multi-dimensional) map.
ArrayRef<AffineExpr> results() const {
- return {getTrailingObjects<AffineExpr>(), numResults};
+ return getTrailingObjects(numResults);
}
bool operator==(const KeyTy &key) const {
@@ -56,7 +58,7 @@ struct AffineMapStorage final
res->numDims = std::get<0>(key);
res->numSymbols = std::get<1>(key);
res->numResults = results.size();
- llvm::uninitialized_copy(results, res->getTrailingObjects<AffineExpr>());
+ llvm::uninitialized_copy(results, res->getTrailingObjects());
return res;
}
};
diff --git a/mlir/lib/IR/Location.cpp b/mlir/lib/IR/Location.cpp
index 8ae3302..f897546 100644
--- a/mlir/lib/IR/Location.cpp
+++ b/mlir/lib/IR/Location.cpp
@@ -34,7 +34,8 @@ using namespace mlir::detail;
namespace mlir::detail {
struct FileLineColRangeAttrStorage final
: public ::mlir::AttributeStorage,
- public llvm::TrailingObjects<FileLineColRangeAttrStorage, unsigned> {
+ private llvm::TrailingObjects<FileLineColRangeAttrStorage, unsigned> {
+ friend llvm::TrailingObjects<FileLineColRangeAttrStorage, unsigned>;
using PointerPair = llvm::PointerIntPair<StringAttr, 2>;
using KeyTy = std::tuple<StringAttr, ::llvm::ArrayRef<unsigned>>;
@@ -62,7 +63,7 @@ struct FileLineColRangeAttrStorage final
result->startLine = elements[0];
// Copy in the element types into the trailing storage.
llvm::uninitialized_copy(elements.drop_front(),
- result->getTrailingObjects<unsigned>());
+ result->getTrailingObjects());
}
return result;
}
@@ -74,12 +75,12 @@ struct FileLineColRangeAttrStorage final
return (filenameAndTrailing.getPointer() == std::get<0>(tblgenKey)) &&
(size() == std::get<1>(tblgenKey).size()) &&
(startLine == std::get<1>(tblgenKey)[0]) &&
- (ArrayRef<unsigned>{getTrailingObjects<unsigned>(), size() - 1} ==
- ArrayRef<unsigned>{std::get<1>(tblgenKey)}.drop_front());
+ (getTrailingObjects(size() - 1) ==
+ std::get<1>(tblgenKey).drop_front());
}
unsigned getLineCols(unsigned index) const {
- return getTrailingObjects<unsigned>()[index - 1];
+ return getTrailingObjects()[index - 1];
}
unsigned getStartLine() const { return startLine; }
diff --git a/mlir/lib/IR/TypeDetail.h b/mlir/lib/IR/TypeDetail.h
index 19f3690..0e952d5 100644
--- a/mlir/lib/IR/TypeDetail.h
+++ b/mlir/lib/IR/TypeDetail.h
@@ -102,7 +102,8 @@ struct FunctionTypeStorage : public TypeStorage {
/// A type representing a collection of other types.
struct TupleTypeStorage final
: public TypeStorage,
- public llvm::TrailingObjects<TupleTypeStorage, Type> {
+ private llvm::TrailingObjects<TupleTypeStorage, Type> {
+ friend llvm::TrailingObjects<TupleTypeStorage, Type>;
using KeyTy = TypeRange;
TupleTypeStorage(unsigned numTypes) : numElements(numTypes) {}
@@ -116,7 +117,7 @@ struct TupleTypeStorage final
auto *result = ::new (rawMem) TupleTypeStorage(key.size());
// Copy in the element types into the trailing storage.
- llvm::uninitialized_copy(key, result->getTrailingObjects<Type>());
+ llvm::uninitialized_copy(key, result->getTrailingObjects());
return result;
}
@@ -126,9 +127,7 @@ struct TupleTypeStorage final
unsigned size() const { return numElements; }
/// Return the held types.
- ArrayRef<Type> getTypes() const {
- return {getTrailingObjects<Type>(), size()};
- }
+ ArrayRef<Type> getTypes() const { return getTrailingObjects(size()); }
KeyTy getAsKey() const { return getTypes(); }
diff --git a/mlir/lib/Target/LLVM/NVVM/Target.cpp b/mlir/lib/Target/LLVM/NVVM/Target.cpp
index 914a349..565f153 100644
--- a/mlir/lib/Target/LLVM/NVVM/Target.cpp
+++ b/mlir/lib/Target/LLVM/NVVM/Target.cpp
@@ -606,9 +606,10 @@ NVPTXSerializer::compileToBinaryNVPTX(const std::string &ptxCode) {
nvPTXCompilerGetErrorLog(compiler, log.data()));
emitError(loc) << "NVPTX compiler invocation failed, error log: "
<< log.data();
- } else
+ } else {
emitError(loc) << "NVPTX compiler invocation failed with error code: "
<< status;
+ }
return std::nullopt;
}
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index 77094d4..7d7d0bb 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -2680,8 +2680,9 @@ ModuleImport::convertParameterAttribute(llvm::AttributeSet llvmParamAttrs,
const llvm::ConstantRange &value = llvmAttr.getValueAsConstantRange();
mlirAttr = builder.getAttr<LLVM::ConstantRangeAttr>(value.getLower(),
value.getUpper());
- } else
+ } else {
llvm_unreachable("unexpected parameter attribute kind");
+ }
paramAttrs.push_back(builder.getNamedAttr(mlirName, mlirAttr));
}
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index ad4f65da..1168b9f 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -296,8 +296,7 @@ translateDataLayout(DataLayoutSpecInterface attribute,
return failure();
}
StringRef layoutSpec(llvmDataLayout);
- if (layoutSpec.starts_with("-"))
- layoutSpec = layoutSpec.drop_front();
+ layoutSpec.consume_front("-");
return llvm::DataLayout(layoutSpec);
}
diff --git a/mlir/lib/Transforms/OpStats.cpp b/mlir/lib/Transforms/OpStats.cpp
index 6746ed5..0dc3fe9 100644
--- a/mlir/lib/Transforms/OpStats.cpp
+++ b/mlir/lib/Transforms/OpStats.cpp
@@ -51,9 +51,9 @@ void PrintOpStatsPass::runOnOperation() {
// Compute the operation statistics for the currently visited operation.
getOperation()->walk(
[&](Operation *op) { ++opCount[op->getName().getStringRef()]; });
- if (printAsJSON) {
+ if (printAsJSON)
printSummaryInJSON();
- } else
+ else
printSummary();
markAllAnalysesPreserved();
}
diff --git a/mlir/python/mlir/dialects/linalg/__init__.py b/mlir/python/mlir/dialects/linalg/__init__.py
index 63586a5..d387c12 100644
--- a/mlir/python/mlir/dialects/linalg/__init__.py
+++ b/mlir/python/mlir/dialects/linalg/__init__.py
@@ -203,6 +203,19 @@ def batch_matmul(
)
+def batch_reduce_matmul(
+ *ins: Union[Operation, OpView, Value],
+ outs: Sequence[Union[Operation, OpView, Value]],
+ indexing_maps: Optional[Sequence[AffineMapAttr]] = None,
+ cast: Optional[Union[TypeFn, Attribute]] = None,
+):
+ return _get_op_result_or_op_results(
+ _create_matmul_like_op(
+ BatchReduceMatmulOp, *ins, outs=outs, indexing_maps=indexing_maps, cast=cast
+ )
+ )
+
+
def contract(
*ins: Union[Operation, OpView, Value],
outs: Sequence[Union[Operation, OpView, Value]],
@@ -216,6 +229,67 @@ def contract(
)
+# Extend and shadow the TableGen-derived version to make sure correct default
+# indexing_maps are derived (as there is no mechanism for doing so given the
+# Python API bypasses the C++-builders).
+class ElementwiseOp_(ElementwiseOp):
+ def __init__(
+ self,
+ result_tensors,
+ inputs,
+ outputs,
+ kind,
+ *,
+ indexing_maps=None,
+ loc=None,
+ ip=None,
+ ):
+ if indexing_maps is None:
+ inputs = [_get_op_result_or_value(in_) for in_ in inputs]
+ for in0, in1 in zip(inputs[:-1], inputs[1:]):
+ assert in0.type == in1.type
+ output = _get_op_result_or_value(outputs[0])
+ assert inputs[0].type == output.type
+ num_args = len(inputs) + 1
+ indexing_maps = [AffineMap.get_identity(output.type.rank)] * num_args
+
+ super().__init__(
+ result_tensors=result_tensors,
+ inputs=inputs,
+ outputs=outputs,
+ kind=kind,
+ indexing_maps=indexing_maps,
+ loc=loc,
+ ip=ip,
+ )
+
+
+ElementwiseOp = ElementwiseOp_
+
+
+def elementwise(
+ *ins: Union[Operation, OpView, Value],
+ outs: Sequence[Union[Operation, OpView, Value]],
+ kind: Union[ElementwiseKind, Attribute],
+ indexing_maps: Optional[Sequence[AffineMapAttr]] = None,
+):
+ ins = [_get_op_result_or_value(input) for input in ins]
+ if len(outs) != 1:
+ raise ValueError(f"{outs=} must have length 1.")
+ init = _get_op_result_or_value(outs[0])
+ result_types = [init.type] if isinstance(init.type, RankedTensorType) else []
+
+ op = ElementwiseOp(
+ result_tensors=result_types,
+ inputs=ins,
+ outputs=[init],
+ kind=kind,
+ indexing_maps=indexing_maps,
+ )
+ fill_builtin_region(op.operation)
+ return _get_op_result_or_op_results(op)
+
+
def pack(
source,
dest,
diff --git a/mlir/test/Dialect/LLVMIR/nvvm.mlir b/mlir/test/Dialect/LLVMIR/nvvm.mlir
index d391549..e842563 100644
--- a/mlir/test/Dialect/LLVMIR/nvvm.mlir
+++ b/mlir/test/Dialect/LLVMIR/nvvm.mlir
@@ -578,6 +578,15 @@ func.func @st_bulk(%addr_gen: !llvm.ptr, %addr_shared: !llvm.ptr<3>, %size: i64)
return
}
+// CHECK-LABEL: @dot_accumulate_4way
+func.func @dot_accumulate_4way(%a: i32, %a_vec: vector<4xi8>, %b: i32, %b_vec: vector<4xi8>, %c: i32) {
+ // CHECK: nvvm.dot.accumulate.4way %{{.*}}, %{{.*}}, %{{.*}} : vector<4xi8>, vector<4xi8>
+ %1 = nvvm.dot.accumulate.4way %a_vec <u8>, %b_vec <u8>, %c: vector<4xi8>, vector<4xi8>
+ // CHECK: nvvm.dot.accumulate.4way %{{.*}}, %{{.*}}, %{{.*}} : vector<4xi8>, vector<4xi8>
+ %3 = nvvm.dot.accumulate.4way %a_vec <s8>, %b_vec <s8>, %c: vector<4xi8>, vector<4xi8>
+ return
+}
+
// -----
// Just check these don't emit errors.
diff --git a/mlir/test/Dialect/Linalg/generalize-named-ops.mlir b/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
index 0ec71c35..ae07b1b 100644
--- a/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
@@ -1024,6 +1024,34 @@ func.func @batch_matmul(%arg0: tensor<2x3x5xf32>, %arg1: tensor<2x5x7xf32>, %arg
// -----
+// CHECK: #[[$ACCESS_A:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
+// CHECK: #[[$ACCESS_B:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
+// CHECK: #[[$ACCESS_C:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+
+// CHECK-LABEL: func.func @batch_reduce_matmul(
+// CHECK-SAME: %[[A:.*]]: tensor<2x3x5xf32>,
+// CHECK-SAME: %[[B:.*]]: tensor<2x5x7xf32>,
+// CHECK-SAME: %[[C:.*]]: tensor<3x7xf32>) -> tensor<3x7xf32> {
+// CHECK: linalg.generic
+// CHECK-SAME: indexing_maps = [#[[$ACCESS_A]], #[[$ACCESS_B]], #[[$ACCESS_C]]],
+// CHECK-SAME: iterator_types = ["reduction", "parallel", "parallel", "reduction"]}
+// CHECK: arith.mulf
+// CHECK: arith.addf
+// CHECK: linalg.yield
+
+func.func @batch_reduce_matmul(%A: tensor<2x3x5xf32>, %B: tensor<2x5x7xf32>, %C: tensor<3x7xf32>) -> tensor<3x7xf32> {
+ %0 = linalg.batch_reduce_matmul indexing_maps = [
+ affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
+ affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ ]
+ ins(%A, %B: tensor<2x3x5xf32>, tensor<2x5x7xf32>)
+ outs(%C: tensor<3x7xf32>) -> tensor<3x7xf32>
+ return %0 : tensor<3x7xf32>
+}
+
+// -----
+
// CHECK: #[[$ACCESS_A:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)>
// CHECK: #[[$ACCESS_B:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
// CHECK: #[[$ACCESS_C:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index 79f2a0d..c0c5f78 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -1364,10 +1364,10 @@ func.func @invalid_bcast_batch_matmul_a(%arg0: memref<?xf32>, %arg1: memref<?x?x
// -----
-func.func @invalid_multi_dim_bcast_expr_batch_matmul_a(%arg0: memref<?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2: memref<?x?x?xf32>) {
+func.func @invalid_single_dim_bcast_expr_batch_matmul_a(%arg0: memref<?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2: memref<?x?x?xf32>) {
// expected-error @+1 {{'linalg.batch_matmul' op Invalid broadcast requested}}
linalg.batch_matmul indexing_maps = [
- affine_map<(d0, d1, d2, d3) -> (d0, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d3, d0)>,
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
]
@@ -1377,14 +1377,14 @@ func.func @invalid_multi_dim_bcast_expr_batch_matmul_a(%arg0: memref<?x?xf32>, %
// -----
-func.func @invalid_multi_dim_bcast_expr_batch_matmul_b(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?x?xf32>) {
+func.func @invalid_single_dim_bcast_expr_batch_matmul_B(%A: memref<?x?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?x?xf32>) {
// expected-error @+1 {{'linalg.batch_matmul' op Invalid broadcast requested}}
linalg.batch_matmul indexing_maps = [
affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>,
affine_map<(d0, d1, d2, d3) -> (d3, d0)>,
affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
]
- ins(%arg0, %arg1 : memref<?x?x?xf32>, memref<?x?xf32>) outs(%arg2: memref<?x?x?xf32>)
+ ins(%A, %B : memref<?x?x?xf32>, memref<?x?xf32>) outs(%C: memref<?x?x?xf32>)
return
}
@@ -1487,6 +1487,205 @@ func.func @invalid_C_map_result_dim_batch_matmul(%arg0: memref<?x?x?xf32>, %arg1
// -----
//===----------------------------------------------------------------------===//
+// linalg.batch_reduce_matmul
+//===----------------------------------------------------------------------===//
+
+func.func @missing_one_indexing_map(%arg0: memref<?x?x?xf32>,
+ %arg1: memref<?x?x?xf32>, %arg2: memref<?x?xf32>) {
+ // expected-error @+1 {{Indexing_map attribute must have 3 affine maps}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
+ affine_map<(batch, m, n, k) -> (batch, n, k)>]
+ ins(%arg0, %arg1 : memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%arg2: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @missing_two_indexing_map(%arg0: memref<?x?x?xf32>,
+ %arg1: memref<?x?x?xf32>, %arg2: memref<?x?xf32>) {
+ // expected-error @+1 {{Indexing_map attribute must have 3 affine maps}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>]
+ ins(%arg0, %arg1 : memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%arg2: memref<?x?xf32>)
+ return
+
+}
+
+// -----
+
+func.func @missing_indexing_map(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2: memref<?x?xf32>) {
+ // expected-error @+1 {{expected attribute value}}
+ linalg.batch_reduce_matmul indexing_maps = [
+ ,
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%arg0, %arg1 : memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%arg2 :memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_dim_expr_A(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Unexpected result dim expression (outside the set of default result dims)}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, n, k)>,
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B : memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%C :memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_dim_expr_B(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Unexpected result dim expression (outside the set of default result dims)}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
+ affine_map<(batch, m, n, k) -> (batch, k, m)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B : memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%C :memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_bcast_A(%A: memref<?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Invalid broadcast requested}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch)>,
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B : memref<?xf32>, memref<?x?x?xf32>)
+ outs(%C: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_multi_dim_bcast_expr_A(%A: memref<?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Invalid broadcast requested}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (k, batch)>,
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B : memref<?x?xf32>, memref<?x?x?xf32>)
+ outs(%C: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_multi_dim_bcast_expr_B(%A: memref<?x?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Invalid broadcast requested}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
+ affine_map<(batch, m, n, k) -> (k, batch)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B : memref<?x?x?xf32>, memref<?x?xf32>)
+ outs(%C: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_bcast_B(%A: memref<?x?x?xf32>, %B: memref<?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Invalid broadcast requested}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
+ affine_map<(batch, m, n, k) -> (n)>,
+ affine_map<(batch, m, n, k) -> (batch, m, n)>]
+ ins(%A, %B : memref<?x?x?xf32>, memref<?xf32>)
+ outs(%C: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_batch_dim_A(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Invalid batch dimension expression}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (m, batch, k)>,
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B : memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%C :memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_batch_dim_B(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Invalid batch dimension expression}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
+ affine_map<(batch, m, n, k) -> (n, k, batch)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B : memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%C :memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_A_map_result_num(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{no. of result dim expressions exceeds 3.}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k, k)>,
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B: memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%C: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_B_map_result_num(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{no. of result dim expressions exceeds 3.}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
+ affine_map<(batch, m, n, k) -> (batch, k, n, k)>,
+ affine_map<(batch, m, n, k) -> (m, n)>]
+ ins(%A, %B: memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%C: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_C_map_result_num(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{expects 2 dims, but got (1).}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m)>]
+ ins(%A, %B: memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%C: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+func.func @invalid_C_map_result_dim(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?xf32>) {
+ // expected-error @+1 {{Invalid output map result dimension.}}
+ linalg.batch_reduce_matmul
+ indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
+ affine_map<(batch, m, n, k) -> (batch, k, n)>,
+ affine_map<(batch, m, n, k) -> (m, k)>]
+ ins(%A, %B: memref<?x?x?xf32>, memref<?x?x?xf32>)
+ outs(%C: memref<?x?xf32>)
+ return
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
// linalg.pack
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/Linalg/named-ops.mlir b/mlir/test/Dialect/Linalg/named-ops.mlir
index 1bd9c88..470bc1c 100644
--- a/mlir/test/Dialect/Linalg/named-ops.mlir
+++ b/mlir/test/Dialect/Linalg/named-ops.mlir
@@ -1637,6 +1637,175 @@ func.func @batch_matmul_bcast_A_transpose_B(%arg0: memref<3x5xf32>, %arg1: memre
// -----
+//===----------------------------------------------------------------------===//
+// linalg.batch_reduce_matmul
+//===----------------------------------------------------------------------===//
+
+// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
+// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
+// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+
+// CHECK-LABEL: func.func @bcast_k_to_fill_missing_dims_A(
+// CHECK-SAME: %[[A:.*]]: memref<5xf32>,
+// CHECK-SAME: %[[B:.*]]: memref<2x5x7xf32>,
+// CHECK-SAME: %[[C:.*]]: memref<3x7xf32>) {
+// CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_2]]] ins(%[[A]], %[[B]] : memref<5xf32>, memref<2x5x7xf32>) outs(%[[C]] : memref<3x7xf32>)
+// CHECK: return
+// CHECK: }
+
+func.func @bcast_k_to_fill_missing_dims_A(%A: memref<5xf32>, %B: memref<2x5x7xf32>, %C: memref<3x7xf32>) {
+ linalg.batch_reduce_matmul indexing_maps = [
+ affine_map<(d0, d1, d2, d3) -> (d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
+ affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ ]
+ ins(%A, %B : memref<5xf32>, memref<2x5x7xf32>) outs(%C: memref<3x7xf32>)
+ return
+}
+
+// -----
+
+// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d3)>
+// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
+// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+
+// CHECK-LABEL: func.func @bcast_batch_dim_A(
+// CHECK-SAME: %[[A:.*]]: memref<3x5xf32>,
+// CHECK-SAME: %[[B:.*]]: memref<2x5x7xf32>,
+// CHECK-SAME: %[[C:.*]]: memref<3x7xf32>) {
+// CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_2]]] ins(%[[A]], %[[B]] : memref<3x5xf32>, memref<2x5x7xf32>) outs(%[[C]] : memref<3x7xf32>)
+// CHECK: return
+// CHECK: }
+
+func.func @bcast_batch_dim_A(%A: memref<3x5xf32>, %B: memref<2x5x7xf32>, %C: memref<3x7xf32>) {
+ linalg.batch_reduce_matmul indexing_maps = [
+ affine_map<(d0, d1, d2, d3) -> (d1, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
+ affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ ]
+ ins(%A, %B : memref<3x5xf32>, memref<2x5x7xf32>) outs(%C: memref<3x7xf32>)
+ return
+}
+
+// -----
+
+// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
+// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
+// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+
+// CHECK-LABEL: func.func @bcast_batch_and_n_dim_B(
+// CHECK-SAME: %[[A:.*]]: memref<2x3x5xf32>,
+// CHECK-SAME: %[[B:.*]]: memref<5xf32>,
+// CHECK-SAME: %[[C:.*]]: memref<3x7xf32>) {
+// CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_2]]] ins(%[[A]], %[[B]] : memref<2x3x5xf32>, memref<5xf32>) outs(%[[C]] : memref<3x7xf32>)
+// CHECK: return
+// CHECK: }
+
+func.func @bcast_batch_and_n_dim_B(%A: memref<2x3x5xf32>, %B: memref<5xf32>, %C: memref<3x7xf32>) {
+ linalg.batch_reduce_matmul indexing_maps = [
+ affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ ]
+ ins(%A, %B : memref<2x3x5xf32>, memref<5xf32>) outs(%C: memref<3x7xf32>)
+ return
+}
+
+// -----
+
+// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
+// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d2)>
+// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+
+// CHECK-LABEL: func.func @bcast_batch_dim_B(
+// CHECK-SAME: %[[A:.*]]: memref<2x3x5xf32>,
+// CHECK-SAME: %[[B:.*]]: memref<5x7xf32>,
+// CHECK-SAME: %[[C:.*]]: memref<3x7xf32>) {
+// CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_2]]] ins(%[[A]], %[[B]] : memref<2x3x5xf32>, memref<5x7xf32>) outs(%[[C]] : memref<3x7xf32>)
+// CHECK: return
+// CHECK: }
+
+func.func @bcast_batch_dim_B(%A: memref<2x3x5xf32>, %B: memref<5x7xf32>, %C: memref<3x7xf32>) {
+ linalg.batch_reduce_matmul indexing_maps = [
+ affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d3, d2)>,
+ affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ ]
+ ins(%A, %B : memref<2x3x5xf32>, memref<5x7xf32>) outs(%C: memref<3x7xf32>)
+ return
+}
+
+// -----
+
+// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>
+// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
+// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+
+// CHECK-LABEL: func.func @explicit_transpose_A(
+// CHECK-SAME: %[[A:.*]]: memref<2x5x3xf32>,
+// CHECK-SAME: %[[B:.*]]: memref<2x5x7xf32>,
+// CHECK-SAME: %[[C:.*]]: memref<3x7xf32>) {
+// CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_2]]] ins(%[[A]], %[[B]] : memref<2x5x3xf32>, memref<2x5x7xf32>) outs(%[[C]] : memref<3x7xf32>)
+// CHECK: return
+// CHECK: }
+func.func @explicit_transpose_A(%A: memref<2x5x3xf32>, %B: memref<2x5x7xf32>, %C: memref<3x7xf32>) {
+ linalg.batch_reduce_matmul indexing_maps = [
+ affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>,
+ affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
+ affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ ]
+ ins(%A, %B : memref<2x5x3xf32>, memref<2x5x7xf32>) outs(%C: memref<3x7xf32>)
+ return
+}
+
+// -----
+
+// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
+// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>
+// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+
+// CHECK-LABEL: func.func @explicit_transpose_B(
+// CHECK-SAME: %[[A:.*]]: memref<2x3x5xf32>,
+// CHECK-SAME: %[[B:.*]]: memref<2x7x5xf32>,
+// CHECK-SAME: %[[C:.*]]: memref<3x7xf32>) {
+// CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_2]]] ins(%[[A]], %[[B]] : memref<2x3x5xf32>, memref<2x7x5xf32>) outs(%[[C]] : memref<3x7xf32>)
+// CHECK: return
+// CHECK: }
+func.func @explicit_transpose_B(%A: memref<2x3x5xf32>, %B: memref<2x7x5xf32>, %C: memref<3x7xf32>) {
+ linalg.batch_reduce_matmul indexing_maps = [
+ affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ ]
+ ins(%A, %B : memref<2x3x5xf32>, memref<2x7x5xf32>) outs(%C: memref<3x7xf32>)
+ return
+}
+
+// -----
+
+// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d3)>
+// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>
+// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+
+// CHECK-LABEL: func.func @bcast_A_transpose_B(
+// CHECK-SAME: %[[A:.*]]: memref<3x5xf32>,
+// CHECK-SAME: %[[B:.*]]: memref<2x7x5xf32>,
+// CHECK-SAME: %[[C:.*]]: memref<3x7xf32>) {
+// CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_2]]] ins(%[[A]], %[[B]] : memref<3x5xf32>, memref<2x7x5xf32>) outs(%[[C]] : memref<3x7xf32>)
+// CHECK: return
+// CHECK: }
+func.func @bcast_A_transpose_B(%A: memref<3x5xf32>, %B: memref<2x7x5xf32>, %C: memref<3x7xf32>) {
+ linalg.batch_reduce_matmul indexing_maps = [
+ affine_map<(d0, d1, d2, d3) -> (d1, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>,
+ affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ ]
+ ins(%A, %B : memref<3x5xf32>, memref<2x7x5xf32>) outs(%C: memref<3x7xf32>)
+ return
+}
+
+// -----
+
// CHECK-LABEL: func @batchmatmul_transpose_a
// CHECK: linalg.batch_matmul_transpose_a
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : memref<2x5x3xf32>, memref<2x5x7xf32>)
diff --git a/mlir/test/Dialect/SCF/canonicalize.mlir b/mlir/test/Dialect/SCF/canonicalize.mlir
index c18bd61..5e32a3a 100644
--- a/mlir/test/Dialect/SCF/canonicalize.mlir
+++ b/mlir/test/Dialect/SCF/canonicalize.mlir
@@ -149,7 +149,7 @@ func.func @one_unused(%cond: i1) -> (index) {
// CHECK: call @side_effect() : () -> ()
// CHECK: [[C1:%.*]] = "test.value1"
// CHECK: scf.yield [[C1]] : index
-// CHECK: } else
+// CHECK: } else {
// CHECK: [[C3:%.*]] = "test.value3"
// CHECK: scf.yield [[C3]] : index
// CHECK: }
@@ -185,12 +185,12 @@ func.func @nested_unused(%cond1: i1, %cond2: i1) -> (index) {
// CHECK: call @side_effect() : () -> ()
// CHECK: [[C1:%.*]] = "test.value1"
// CHECK: scf.yield [[C1]] : index
-// CHECK: } else
+// CHECK: } else {
// CHECK: [[C3:%.*]] = "test.value3"
// CHECK: scf.yield [[C3]] : index
// CHECK: }
// CHECK: scf.yield [[V1]] : index
-// CHECK: } else
+// CHECK: } else {
// CHECK: [[C1_2:%.*]] = "test.value1_2"
// CHECK: scf.yield [[C1_2]] : index
// CHECK: }
@@ -215,7 +215,7 @@ func.func @all_unused(%cond: i1) {
// CHECK-LABEL: func @all_unused
// CHECK: scf.if %{{.*}} {
// CHECK: call @side_effect() : () -> ()
-// CHECK: } else
+// CHECK: } else {
// CHECK: call @side_effect() : () -> ()
// CHECK: }
// CHECK: return
diff --git a/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir b/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir
new file mode 100644
index 0000000..b911bb3
--- /dev/null
+++ b/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir
@@ -0,0 +1,161 @@
+// RUN: mlir-opt --test-xegpu-unrolling-patterns -split-input-file %s | FileCheck %s
+
+gpu.module @test {
+
+ // CHECK-LABEL: test_create_nd_tdesc
+ // CHECK-SAME: [[arg0:%.+]]: memref<24x32xf32>
+ // CHECK-COUNT-6: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
+ // CHECK: [[cast:%.+]] = builtin.unrealized_conversion_cast
+ // CHECK-SAME: !xegpu.tensor_desc<8x16xf32>, !xegpu.tensor_desc<8x16xf32>,
+ // CHECK-SAME: !xegpu.tensor_desc<8x16xf32>, !xegpu.tensor_desc<8x16xf32>,
+ // CHECK-SAME: !xegpu.tensor_desc<8x16xf32>, !xegpu.tensor_desc<8x16xf32>
+ // CHECK-SAME: to !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>> {__xegpu_blocking_tile_shape__ = array<i64: 8, 16>, __xegpu_blocking_unpack__}
+ gpu.func @test_create_nd_tdesc(%src: memref<24x32xf32>) -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>> {
+ %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ gpu.return %tdesc : !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_create_nd_tdesc_1d
+ // CHECK-SAME: [[arg0:%.+]]: memref<64xf32>
+ // CHECK-COUNT-2: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<64xf32> -> !xegpu.tensor_desc<16xf32>
+ // CHECK: [[cast:%.+]] = builtin.unrealized_conversion_cast
+ // CHECK-SAME: !xegpu.tensor_desc<16xf32>, !xegpu.tensor_desc<16xf32>
+ // CHECK-SAME: to !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>> {__xegpu_blocking_tile_shape__ = array<i64: 16>, __xegpu_blocking_unpack__}
+ gpu.func @test_create_nd_tdesc_1d(%src: memref<64xf32>) -> !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>> {
+ %tdesc = xegpu.create_nd_tdesc %src[0] : memref<64xf32> -> !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>>
+ gpu.return %tdesc : !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>>
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_update_nd_tdesc
+ // CHECK-SAME: [[arg0:%.+]]: memref<24x32xf32>
+ // CHECK-COUNT-6: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
+ // CHECK-COUNT-6: [[update:%.+]] = xegpu.update_nd_offset {{.*}} : !xegpu.tensor_desc<8x16xf32>
+ gpu.func @test_update_nd_tdesc(%src: memref<24x32xf32>) -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>> {
+ %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ %update = xegpu.update_nd_offset %tdesc, [0, 16] : !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ gpu.return %update : !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_update_nd_tdesc_1d
+ // CHECK-SAME: [[arg0:%.+]]: memref<64xf32>
+ // CHECK-COUNT-2: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<64xf32> -> !xegpu.tensor_desc<16xf32>
+ // CHECK-COUNT-2: [[update:%.+]] = xegpu.update_nd_offset {{.*}} : !xegpu.tensor_desc<16xf32>
+ gpu.func @test_update_nd_tdesc_1d(%src: memref<64xf32>) -> !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>> {
+ %tdesc = xegpu.create_nd_tdesc %src[0] : memref<64xf32> -> !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>>
+ %update = xegpu.update_nd_offset %tdesc, [32] : !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>>
+ gpu.return %update : !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>>
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_prefetch_nd_tdesc
+ // CHECK-SAME: [[arg0:%.+]]: memref<24x32xf32>
+ // CHECK-COUNT-6: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
+ // CHECK-COUNT-6: xegpu.prefetch_nd {{.*}} : !xegpu.tensor_desc<8x16xf32>
+ gpu.func @test_prefetch_nd_tdesc(%src: memref<24x32xf32>) {
+ %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ xegpu.prefetch_nd %tdesc : !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ gpu.return
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_prefetch_nd_tdesc_1d
+ // CHECK-SAME: [[arg0:%.+]]: memref<64xf32>
+ // CHECK-COUNT-4: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<64xf32> -> !xegpu.tensor_desc<16xf32>
+ // CHECK-COUNT-4: xegpu.prefetch_nd {{.*}} : !xegpu.tensor_desc<16xf32>
+ gpu.func @test_prefetch_nd_tdesc_1d(%src: memref<64xf32>) {
+ %tdesc = xegpu.create_nd_tdesc %src[0] : memref<64xf32> -> !xegpu.tensor_desc<64xf32, #xegpu.layout<inst_data = [16]>>
+ xegpu.prefetch_nd %tdesc : !xegpu.tensor_desc<64xf32, #xegpu.layout<inst_data = [16]>>
+ gpu.return
+ }
+
+ //-----
+ // CHECK-LABEL: test_load_nd
+ // CHECK-SAME: [[arg0:%.+]]: memref<24x32xf32>
+ // CHECK-COUNT-6: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
+ // CHECK-COUNT-6: [[ld:%.+]] = xegpu.load_nd {{.*}} : !xegpu.tensor_desc<8x16xf32> -> vector<8x16xf32>
+ // CHECK-COUNT-6: [[insert:%.+]] = vector.insert_strided_slice {{.*}} : vector<8x16xf32> into vector<24x32xf32>
+ gpu.func @test_load_nd(%src: memref<24x32xf32>) -> vector<24x32xf32> {
+ %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ %ld = xegpu.load_nd %tdesc: !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>> -> vector<24x32xf32>
+ gpu.return %ld : vector<24x32xf32>
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_load_nd_1d
+ // CHECK-SAME: [[arg0:%.+]]: memref<64xf32>
+ // CHECK-COUNT-4: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<64xf32> -> !xegpu.tensor_desc<16xf32>
+ // CHECK-COUNT-4: [[ld:%.+]] = xegpu.load_nd {{.*}} : !xegpu.tensor_desc<16xf32> -> vector<16xf32>
+ // CHECK-COUNT-4: [[insert:%.+]] = vector.insert_strided_slice {{.*}} : vector<16xf32> into vector<64xf32>
+ gpu.func @test_load_nd_1d(%src: memref<64xf32>) -> vector<64xf32> {
+ %tdesc = xegpu.create_nd_tdesc %src[0] : memref<64xf32> -> !xegpu.tensor_desc<64xf32, #xegpu.layout<inst_data = [16]>>
+ %data = xegpu.load_nd %tdesc: !xegpu.tensor_desc<64xf32, #xegpu.layout<inst_data = [16]>> -> vector<64xf32>
+ gpu.return %data : vector<64xf32>
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_store_nd
+ // CHECK-SAME: [[arg0:%.+]]: memref<24x32xf32>
+ // CHECK-COUNT-6: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
+ // CHECK-COUNT-6: xegpu.store_nd {{.*}} : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32>
+ gpu.func @test_store_nd(%src: memref<24x32xf32>) {
+ %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ %data = arith.constant dense<9.0> : vector<24x32xf32>
+ xegpu.store_nd %data, %tdesc: vector<24x32xf32>, !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ gpu.return
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_store_nd_1d
+ // CHECK-SAME: [[arg0:%.+]]: memref<64xf32>
+ // CHECK-COUNT-4: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<64xf32> -> !xegpu.tensor_desc<16xf32>
+ // CHECK-COUNT-4: xegpu.store_nd {{.*}} : vector<16xf32>, !xegpu.tensor_desc<16xf32>
+ gpu.func @test_store_nd_1d(%src: memref<64xf32>) {
+ %tdesc = xegpu.create_nd_tdesc %src[0] : memref<64xf32> -> !xegpu.tensor_desc<64xf32, #xegpu.layout<inst_data = [16]>>
+ %data = arith.constant dense<9.0> : vector<64xf32>
+ xegpu.store_nd %data, %tdesc: vector<64xf32>, !xegpu.tensor_desc<64xf32, #xegpu.layout<inst_data = [16]>>
+ gpu.return
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_createNd_loadNd_storeNd
+ // CHECK-SAME: [[arg0:%.+]]: memref<24x32xf32>
+ //CHECK-COUNT-6: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
+ //CHECK-COUNT-6: [[data:%.+]] = xegpu.load_nd {{.*}} : !xegpu.tensor_desc<8x16xf32> -> vector<8x16xf32>
+ //CHECK-COUNT-6: [[insert:%.+]] = vector.insert_strided_slice {{.*}} : vector<8x16xf32> into vector<24x32xf32>
+ //CHECK: [[add:%.+]] = arith.addf {{.*}} : vector<24x32xf32>
+ //CHECK-COUNT-6: [[extract:%.+]] = vector.extract_strided_slice {{.*}} : vector<24x32xf32> to vector<8x16xf32>
+ //CHECK-COUNT-6: xegpu.store_nd {{.*}} : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32>
+ gpu.func @test_createNd_loadNd_storeNd(%src: memref<24x32xf32>) {
+ %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ %data = arith.constant dense<9.0> : vector<24x32xf32>
+ %ld = xegpu.load_nd %tdesc: !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>> -> vector<24x32xf32>
+ %add = arith.addf %data, %ld : vector<24x32xf32>
+ xegpu.store_nd %add, %tdesc: vector<24x32xf32>, !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
+ gpu.return
+ }
+
+ //-----
+
+ // CHECK-LABEL: test_dpas
+ // CHECK-SAME: [[arg0:%.+]]: vector<32x32xf16>, [[arg1:%.+]]: vector<32x32xf16>
+ //CHECK-COUNT-8: [[extract1:%.+]] = vector.extract_strided_slice [[arg0]] {{.*}} : vector<32x32xf16> to vector<8x16xf16>
+ //CHECK-COUNT-4: [[extract2:%.+]] = vector.extract_strided_slice [[arg1]] {{.*}} : vector<32x32xf16> to vector<16x16xf16>
+ //CHECK-COUNT-16: [[dpas:%.+]] = xegpu.dpas {{.*}} -> vector<8x16xf32>
+ //CHECK-COUNT-8: [[insert:%.+]] = vector.insert_strided_slice {{.*}} : vector<8x16xf32> into vector<32x32xf32>
+ gpu.func @test_dpas(%a: vector<32x32xf16>, %b: vector<32x32xf16>) -> vector<32x32xf32> {
+ %c = xegpu.dpas %a, %b : vector<32x32xf16>, vector<32x32xf16> -> vector<32x32xf32>
+ gpu.return %c : vector<32x32xf32>
+ }
+}
diff --git a/mlir/test/Target/LLVMIR/nvvmir.mlir b/mlir/test/Target/LLVMIR/nvvmir.mlir
index 3a0713f..894b727 100644
--- a/mlir/test/Target/LLVMIR/nvvmir.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir.mlir
@@ -844,3 +844,25 @@ llvm.func @nvvm_st_bulk(%addr_gen: !llvm.ptr, %addr_shared: !llvm.ptr<3>, %size:
nvvm.st.bulk %addr_shared, size = %size, init = 0: !llvm.ptr<3>
llvm.return
}
+
+// -----
+// CHECK-LABEL: @nvvm_dot_accumulate_4way
+llvm.func @nvvm_dot_accumulate_4way(%a: vector<4xi8>, %b: vector<4xi8>, %c: i32) {
+ // CHECK: %[[a_cast:.*]] = bitcast <4 x i8> %{{.*}} to i32
+ // CHECK: %[[b_cast:.*]] = bitcast <4 x i8> %{{.*}} to i32
+ // CHECK: call i32 @llvm.nvvm.idp4a.u.u(i32 %[[a_cast]], i32 %[[b_cast]], i32 %{{.*}})
+ %0 = nvvm.dot.accumulate.4way %a <u8>, %b <u8>, %c: vector<4xi8>, vector<4xi8>
+ // CHECK: %[[a_cast:.*]] = bitcast <4 x i8> %{{.*}} to i32
+ // CHECK: %[[b_cast:.*]] = bitcast <4 x i8> %{{.*}} to i32
+ // CHECK: call i32 @llvm.nvvm.idp4a.s.u(i32 %[[a_cast]], i32 %[[b_cast]], i32 %{{.*}})
+ %1 = nvvm.dot.accumulate.4way %a <s8>, %b <u8>, %c: vector<4xi8>, vector<4xi8>
+ // CHECK: %[[a_cast:.*]] = bitcast <4 x i8> %{{.*}} to i32
+ // CHECK: %[[b_cast:.*]] = bitcast <4 x i8> %{{.*}} to i32
+ // CHECK: call i32 @llvm.nvvm.idp4a.u.s(i32 %[[a_cast]], i32 %[[b_cast]], i32 %{{.*}})
+ %2 = nvvm.dot.accumulate.4way %a <u8>, %b <s8>, %c: vector<4xi8>, vector<4xi8>
+ // CHECK: %[[a_cast:.*]] = bitcast <4 x i8> %{{.*}} to i32
+ // CHECK: %[[b_cast:.*]] = bitcast <4 x i8> %{{.*}} to i32
+ // CHECK: call i32 @llvm.nvvm.idp4a.s.s(i32 %[[a_cast]], i32 %[[b_cast]], i32 %{{.*}})
+ %3 = nvvm.dot.accumulate.4way %a <s8>, %b <s8>, %c: vector<4xi8>, vector<4xi8>
+ llvm.return
+}
diff --git a/mlir/test/lib/Dialect/CMakeLists.txt b/mlir/test/lib/Dialect/CMakeLists.txt
index 29fb444..a8fd70e 100644
--- a/mlir/test/lib/Dialect/CMakeLists.txt
+++ b/mlir/test/lib/Dialect/CMakeLists.txt
@@ -22,3 +22,4 @@ add_subdirectory(TestDyn)
add_subdirectory(Tosa)
add_subdirectory(Transform)
add_subdirectory(Vector)
+add_subdirectory(XeGPU)
diff --git a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
index 454a12b..b5a8bd1 100644
--- a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
+++ b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
@@ -801,8 +801,9 @@ void TestReflectBoundsOp::inferResultRanges(
unsigned bitwidth = intTy.getWidth();
sIntTy = b.getIntegerType(bitwidth, /*isSigned=*/true);
uIntTy = b.getIntegerType(bitwidth, /*isSigned=*/false);
- } else
+ } else {
sIntTy = uIntTy = type;
+ }
setUminAttr(b.getIntegerAttr(uIntTy, range.umin()));
setUmaxAttr(b.getIntegerAttr(uIntTy, range.umax()));
diff --git a/mlir/test/lib/Dialect/XeGPU/CMakeLists.txt b/mlir/test/lib/Dialect/XeGPU/CMakeLists.txt
new file mode 100644
index 0000000..5236d87
--- /dev/null
+++ b/mlir/test/lib/Dialect/XeGPU/CMakeLists.txt
@@ -0,0 +1,16 @@
+add_mlir_dialect_library(MLIRXeGPUTestPasses
+ TestXeGPUTransforms.cpp
+
+ EXCLUDE_FROM_LIBMLIR
+)
+
+mlir_target_link_libraries(MLIRXeGPUTestPasses PUBLIC
+ MLIRAffineUtils
+ MLIRIR
+ MLIRMemRefDialect
+ MLIRXeGPUDialect
+ MLIRPass
+ MLIRTransforms
+ MLIRGPUDialect
+ MLIRXeGPUTransforms
+)
diff --git a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
new file mode 100644
index 0000000..eaa3b98
--- /dev/null
+++ b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
@@ -0,0 +1,124 @@
+//===- TestXeGPUTransforms.cpp -- Test Vector transforms and lowerings ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/GPU/IR/GPUDialect.h"
+#include "mlir/Dialect/Vector/Transforms/VectorTransforms.h"
+#include "mlir/Dialect/XeGPU/IR/XeGPU.h"
+#include "mlir/Dialect/XeGPU/Transforms/Transforms.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Pass/PassManager.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+using namespace mlir;
+using namespace mlir::xegpu;
+
+namespace {
+
+struct TestXeGPUUnrollingPatterns
+ : public PassWrapper<TestXeGPUUnrollingPatterns,
+ OperationPass<gpu::GPUModuleOp>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestXeGPUUnrollingPatterns)
+
+ StringRef getArgument() const final {
+ return "test-xegpu-unrolling-patterns";
+ }
+
+ StringRef getDescription() const final {
+ return "Test lowering patterns to unroll ops in the xegpu dialect";
+ }
+
+ void getDependentDialects(::mlir::DialectRegistry &registry) const override {
+ registry.insert<memref::MemRefDialect>();
+ registry.insert<xegpu::XeGPUDialect>();
+ registry.insert<vector::VectorDialect>();
+ }
+
+ TestXeGPUUnrollingPatterns() = default;
+ TestXeGPUUnrollingPatterns(const TestXeGPUUnrollingPatterns &pass)
+ : PassWrapper(pass) {}
+
+ void runOnOperation() override {
+ MLIRContext *ctx = &getContext();
+ xegpu::UnrollOptions options;
+ options.setNativeShapeFn(
+ [&](Operation *op) -> std::optional<SmallVector<int64_t>> {
+ if (isa<xegpu::CreateNdDescOp, xegpu::UpdateNdOffsetOp,
+ xegpu::PrefetchNdOp, xegpu::LoadNdOp, xegpu::StoreNdOp>(op)) {
+ xegpu::TensorDescType tdescTy;
+ if (auto createNdOp = dyn_cast<xegpu::CreateNdDescOp>(op)) {
+ tdescTy = createNdOp.getType();
+ } else if (auto updateNdOp =
+ dyn_cast<xegpu::UpdateNdOffsetOp>(op)) {
+ tdescTy = updateNdOp.getTensorDescType();
+ } else if (auto prefetchNdOp = dyn_cast<xegpu::PrefetchNdOp>(op)) {
+ tdescTy = prefetchNdOp.getTensorDescType();
+ } else if (auto loadNdOp = dyn_cast<xegpu::LoadNdOp>(op)) {
+ tdescTy = loadNdOp.getTensorDescType();
+ } else if (auto storeNdOp = dyn_cast<xegpu::StoreNdOp>(op)) {
+ tdescTy = storeNdOp.getTensorDescType();
+ }
+
+ if (auto layout = tdescTy.getLayoutAttr()) {
+ auto inst_data = layout.getInstData();
+ if (inst_data && layout.isSgLayout())
+ return SmallVector<int64_t>(inst_data.asArrayRef().begin(),
+ inst_data.asArrayRef().end());
+ }
+ }
+
+ if (isa<xegpu::DpasOp>(op))
+ return SmallVector<int64_t>{8, 16, 16};
+
+ return std::nullopt;
+ });
+
+ options.setUnrolledTypesFn(
+ [&](ShapedType type, ArrayRef<int64_t> tileShape) -> SmallVector<Type> {
+ Type elemTy = type.getElementType();
+ Type newTy;
+
+ // TensorDescType needs to drop the inst_data field in the layout
+ // attribute
+ if (auto tdescTy = dyn_cast<xegpu::TensorDescType>(type)) {
+ Attribute encoding = tdescTy.getEncoding();
+ auto layout = llvm::dyn_cast_if_present<xegpu::LayoutAttr>(
+ tdescTy.getLayout());
+ if (layout) {
+ if (layout.getLaneLayout() == nullptr)
+ layout = xegpu::LayoutAttr();
+ else
+ layout = layout.dropInstData();
+ }
+ newTy = xegpu::TensorDescType::get(ctx, tileShape, elemTy, encoding,
+ layout);
+ } else {
+ newTy = type.clone(tileShape, elemTy);
+ }
+
+ std::optional<SmallVector<int64_t>> ratio =
+ computeShapeRatio(type.getShape(), tileShape);
+ assert(ratio && "Expecting the ratio to be valid.");
+ return SmallVector<Type>(computeProduct(*ratio), newTy);
+ });
+
+ RewritePatternSet patterns(ctx);
+
+ populateXeGPUUnrollPatterns(patterns, options);
+ (void)applyPatternsGreedily(getOperation(), std::move(patterns));
+ }
+};
+
+} // namespace
+
+namespace mlir {
+namespace test {
+void registerTestXeGPULowerings() {
+ PassRegistration<TestXeGPUUnrollingPatterns>();
+}
+} // namespace test
+} // namespace mlir \ No newline at end of file
diff --git a/mlir/test/python/dialects/linalg/ops.py b/mlir/test/python/dialects/linalg/ops.py
index e32a911..f1e2afa 100644
--- a/mlir/test/python/dialects/linalg/ops.py
+++ b/mlir/test/python/dialects/linalg/ops.py
@@ -568,6 +568,107 @@ def testBatchMatmulOp():
print(module)
+# CHECK-LABEL: TEST: testBatchReduceMatmulOp
+@run
+def testBatchReduceMatmulOp():
+ with Context(), Location.unknown():
+ module = Module.create()
+ f32 = F32Type.get()
+ with InsertionPoint(module.body):
+ a_shape = (5, 4, 8)
+ b_shape = (5, 8, 12)
+ b_transposed_shape = (5, 12, 8)
+ c_shape = (4, 12)
+
+ dimBatch = ir.AffineDimExpr.get(0)
+ dimM = ir.AffineDimExpr.get(1)
+ dimN = ir.AffineDimExpr.get(2)
+ dimK = ir.AffineDimExpr.get(3)
+
+ # CHECK: #[[$A_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
+ # CHECK: #[[$BTrans_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>
+ # CHECK: #[[$C_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+ a_map = ir.AffineMap.get(4, 0, [dimBatch, dimM, dimK])
+ b_transposed_map = ir.AffineMap.get(4, 0, [dimBatch, dimN, dimK])
+ c_map = ir.AffineMap.get(4, 0, [dimM, dimN])
+
+ # CHECK: func.func @batch_reduce_matmul_op(
+ @func.FuncOp.from_py_func(
+ # CHECK-SAME: %[[A:.*]]: tensor<5x4x8xf32>,
+ RankedTensorType.get(a_shape, f32),
+ # CHECK-SAME: %[[Amem:.*]]: memref<5x4x8xf32>,
+ MemRefType.get(a_shape, f32),
+ # CHECK-SAME: %[[B:.*]]: tensor<5x8x12xf32>,
+ RankedTensorType.get(b_shape, f32),
+ # CHECK-SAME: %[[Bmem:.*]]: memref<5x8x12xf32>,
+ MemRefType.get(b_shape, f32),
+ # CHECK-SAME: %[[BTrans:.*]]: tensor<5x12x8xf32>,
+ RankedTensorType.get(b_transposed_shape, f32),
+ # CHECK-SAME: %[[BTransmem:.*]]: memref<5x12x8xf32>,
+ MemRefType.get(b_transposed_shape, f32),
+ # CHECK-SAME: %[[C:.*]]: tensor<4x12xf32>,
+ RankedTensorType.get(c_shape, f32),
+ # CHECK-SAME: %[[Cmem:.*]]: memref<4x12xf32>)
+ MemRefType.get(c_shape, f32),
+ )
+ def batch_reduce_matmul_op(
+ A, Amem, B, Bmem, Btransposed, Btransposedmem, C, Cmem
+ ):
+ # CHECK: linalg.batch_reduce_matmul ins(%[[A]], %[[B]] : tensor<5x4x8xf32>, tensor<5x8x12xf32>) outs(%[[C]] : tensor<4x12xf32>)
+ res = linalg.BatchReduceMatmulOp(
+ result_tensors=(C.type,),
+ inputs=(A, B),
+ outputs=(C,),
+ )
+ linalg.fill_builtin_region(res.operation)
+ # CHECK: linalg.batch_reduce_matmul ins(%[[A]], %[[B]] : tensor<5x4x8xf32>, tensor<5x8x12xf32>) outs(%[[C]] : tensor<4x12xf32>)
+ res = linalg.batch_reduce_matmul(A, B, outs=(C,))
+
+ # CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$A_MAP]], #[[$BTrans_MAP]], #[[$C_MAP]]] ins(%[[A]], %[[BTrans]] : tensor<5x4x8xf32>, tensor<5x12x8xf32>) outs(%[[C]] : tensor<4x12xf32>)
+ res = linalg.BatchReduceMatmulOp(
+ result_tensors=(C.type,),
+ inputs=(A, Btransposed),
+ outputs=(C,),
+ indexing_maps=[a_map, b_transposed_map, c_map],
+ )
+ linalg.fill_builtin_region(res.operation)
+ # CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$A_MAP]], #[[$BTrans_MAP]], #[[$C_MAP]]] ins(%[[A]], %[[BTrans]] : tensor<5x4x8xf32>, tensor<5x12x8xf32>) outs(%[[C]] : tensor<4x12xf32>)
+ res = linalg.batch_reduce_matmul(
+ A,
+ Btransposed,
+ outs=(C,),
+ indexing_maps=[a_map, b_transposed_map, c_map],
+ )
+
+ # CHECK: linalg.batch_reduce_matmul ins(%[[Amem]], %[[Bmem]] : memref<5x4x8xf32>, memref<5x8x12xf32>) outs(%[[Cmem]] : memref<4x12xf32>)
+ res = linalg.BatchReduceMatmulOp(
+ result_tensors=[],
+ inputs=(Amem, Bmem),
+ outputs=(Cmem,),
+ )
+ linalg.fill_builtin_region(res.operation)
+ # CHECK: linalg.batch_reduce_matmul ins(%[[Amem]], %[[Bmem]] : memref<5x4x8xf32>, memref<5x8x12xf32>) outs(%[[Cmem]] : memref<4x12xf32>)
+ linalg.batch_reduce_matmul(Amem, Bmem, outs=(Cmem,))
+
+ # CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$A_MAP]], #[[$BTrans_MAP]], #[[$C_MAP]]] ins(%[[Amem]], %[[BTransmem]] : memref<5x4x8xf32>, memref<5x12x8xf32>) outs(%[[Cmem]] : memref<4x12xf32>)
+ res = linalg.BatchReduceMatmulOp(
+ result_tensors=[],
+ inputs=(Amem, Btransposedmem),
+ outputs=(Cmem,),
+ indexing_maps=[a_map, b_transposed_map, c_map],
+ )
+ linalg.fill_builtin_region(res.operation)
+ # CHECK: linalg.batch_reduce_matmul indexing_maps = [#[[$A_MAP]], #[[$BTrans_MAP]], #[[$C_MAP]]] ins(%[[Amem]], %[[BTransmem]] : memref<5x4x8xf32>, memref<5x12x8xf32>) outs(%[[Cmem]] : memref<4x12xf32>)
+ linalg.batch_reduce_matmul(
+ Amem,
+ Btransposedmem,
+ outs=(Cmem,),
+ indexing_maps=[a_map, b_transposed_map, c_map],
+ )
+
+ print(module)
+
+
# CHECK-LABEL: TEST: testPackUnPackOp
@run
def testPackUnPackOp():
@@ -606,3 +707,189 @@ def testPackUnPackOp():
# CHECK: return %[[VAL_4]] : tensor<128x128xf32>
# CHECK: }
print(module)
+
+
+# CHECK-LABEL: TEST: testElementwiseOp
+@run
+def testElementwiseOp():
+ with Context(), Location.unknown():
+ module = Module.create()
+ f32 = F32Type.get()
+ with InsertionPoint(module.body):
+ rect_shape = (8, 16)
+ vert_line_shape = (8,)
+ hor_line_shape = (16,)
+ transposed_rect_shape = (16, 8)
+
+ # CHECK-DAG: #[[$IdentMap2D:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+ # CHECK-DAG: #[[$TransMap2D:.*]] = affine_map<(d0, d1) -> (d1, d0)>
+ # CHECK-DAG: #[[$VertLineBCastMap:.*]] = affine_map<(d0, d1) -> (d0)>
+ # CHECK-DAG: #[[$HorLineBCastMap:.*]] = affine_map<(d0, d1) -> (d1)>
+
+ ident_map_2d = AffineMap.get_identity(2)
+ transposed_map_2d = AffineMap.get_permutation((1, 0))
+ vert_line_bcast_map = AffineMap.get(2, 0, [AffineDimExpr.get(0)])
+ hor_line_bcast_map = AffineMap.get(2, 0, [AffineDimExpr.get(1)])
+
+ # CHECK: func.func @elementwise_op(
+ @func.FuncOp.from_py_func(
+ # CHECK-SAME: %[[Rect:.*]]: tensor<8x16xf32>,
+ RankedTensorType.get(rect_shape, f32),
+ # CHECK-SAME: %[[RectMem:.*]]: memref<8x16xf32>,
+ MemRefType.get(rect_shape, f32),
+ # CHECK-SAME: %[[VertLine:.*]]: tensor<8xf32>,
+ RankedTensorType.get(vert_line_shape, f32),
+ # CHECK-SAME: %[[VertLineMem:.*]]: memref<8xf32>,
+ MemRefType.get(vert_line_shape, f32),
+ # CHECK-SAME: %[[HorLine:.*]]: tensor<16xf32>,
+ RankedTensorType.get(hor_line_shape, f32),
+ # CHECK-SAME: %[[HorLineMem:.*]]: memref<16xf32>,
+ MemRefType.get(hor_line_shape, f32),
+ # CHECK-SAME: %[[TransRect:.*]]: tensor<16x8xf32>,
+ RankedTensorType.get(transposed_rect_shape, f32),
+ # CHECK-SAME: %[[TransRectMem:.*]]: memref<16x8xf32>)
+ MemRefType.get(transposed_rect_shape, f32),
+ )
+ def elementwise_op(
+ rect,
+ rect_mem,
+ vert_line,
+ vert_line_mem,
+ hor_line,
+ hor_line_mem,
+ trans_rect,
+ trans_rect_mem,
+ ):
+ # CHECK: %[[OutRect:.*]] = tensor.empty() : tensor<8x16xf32>
+ out_rect = tensor.EmptyOp(rect_shape, f32)
+ # CHECK: %[[OutRectMem:.*]] = memref.alloca() : memref<8x16xf32>
+ out_rect_mem = memref.alloca(MemRefType.get(rect_shape, f32), [], [])
+
+ if _inferred_affine_maps := True:
+ # CHECK: linalg.elementwise
+ # CHECK-SAME: kind=#linalg.elementwise_kind<exp>
+ # CHECK-SAME: ins(%[[Rect]] : tensor<8x16xf32>)
+ # CHECK-SAME: outs(%[[OutRect]] : tensor<8x16xf32>) -> tensor<8x16xf32>
+ op1 = linalg.ElementwiseOp(
+ result_tensors=(out_rect.result.type,),
+ inputs=(rect,),
+ outputs=(out_rect,),
+ kind=linalg.ElementwiseKind.exp,
+ )
+ linalg.fill_builtin_region(op1.operation)
+
+ # CHECK: linalg.elementwise
+ # CHECK-SAME: kind=#linalg.elementwise_kind<exp>
+ # CHECK-SAME: ins(%[[Rect]] : tensor<8x16xf32>)
+ # CHECK-SAME: outs(%[[OutRect]] : tensor<8x16xf32>) -> tensor<8x16xf32>
+ linalg.elementwise(
+ rect,
+ outs=(out_rect,),
+ kind=linalg.ElementwiseKind.exp,
+ )
+
+ # CHECK: linalg.elementwise
+ # CHECK-SAME: kind=#linalg.elementwise_kind<exp>
+ # CHECK-SAME: ins(%[[RectMem]] : memref<8x16xf32>)
+ # CHECK-SAME: outs(%[[OutRectMem]] : memref<8x16xf32>)
+ linalg.elementwise(
+ rect_mem,
+ outs=(out_rect_mem,),
+ kind=linalg.ElementwiseKind.exp,
+ )
+
+ if _explicit_ident_affine_maps := True:
+ # Same as above but with default identity indexing_maps explicitly provided.
+ # CHECK: linalg.elementwise
+ # CHECK-SAME: kind=#linalg.elementwise_kind<exp>
+ # CHECK-SAME: ins(%[[Rect]] : tensor<8x16xf32>)
+ # CHECK-SAME: outs(%[[OutRect]] : tensor<8x16xf32>) -> tensor<8x16xf32>
+ op3 = linalg.ElementwiseOp(
+ result_tensors=(out_rect.result.type,),
+ inputs=(rect,),
+ outputs=(out_rect,),
+ kind=linalg.ElementwiseKind.exp,
+ indexing_maps=[ident_map_2d, ident_map_2d],
+ )
+ linalg.fill_builtin_region(op3.operation)
+
+ # CHECK: linalg.elementwise
+ # CHECK-SAME: kind=#linalg.elementwise_kind<exp>
+ # CHECK-SAME: ins(%[[RectMem]] : memref<8x16xf32>)
+ # CHECK-SAME: outs(%[[OutRectMem]] : memref<8x16xf32>)
+ linalg.elementwise(
+ rect_mem,
+ outs=(out_rect_mem,),
+ kind=linalg.ElementwiseKind.exp,
+ indexing_maps=[ident_map_2d, ident_map_2d],
+ )
+
+ if _ops_with_non_ident_input_maps := True:
+ # CHECK: linalg.elementwise kind=#linalg.elementwise_kind<exp>
+ # CHECK-SAME: indexing_maps = [#[[$VertLineBCastMap]], #[[$IdentMap2D]]]
+ # CHECK-SAME: ins(%[[VertLine]] : tensor<8xf32>)
+ # CHECK-SAME: outs(%[[OutRect]] : tensor<8x16xf32>) -> tensor<8x16xf32>
+ op4 = linalg.ElementwiseOp(
+ result_tensors=(out_rect.result.type,),
+ inputs=(vert_line,),
+ outputs=(out_rect,),
+ kind=linalg.ElementwiseKind.exp,
+ indexing_maps=[vert_line_bcast_map, ident_map_2d],
+ )
+ linalg.fill_builtin_region(op4.operation)
+
+ # CHECK: linalg.elementwise kind=#linalg.elementwise_kind<add>
+ # CHECK-SAME: indexing_maps = [#[[$IdentMap2D]], #[[$VertLineBCastMap]], #[[$IdentMap2D]]]
+ # CHECK-SAME: ins(%[[Rect]], %[[VertLine]] : tensor<8x16xf32>, tensor<8xf32>)
+ # CHECK-SAME: outs(%[[OutRect]] : tensor<8x16xf32>) -> tensor<8x16xf32>
+ op4 = linalg.ElementwiseOp(
+ result_tensors=(out_rect.result.type,),
+ inputs=(rect, vert_line),
+ outputs=(out_rect,),
+ kind=linalg.ElementwiseKind.add,
+ indexing_maps=[ident_map_2d, vert_line_bcast_map, ident_map_2d],
+ )
+ linalg.fill_builtin_region(op4.operation)
+
+ # CHECK: linalg.elementwise kind=#linalg.elementwise_kind<div>
+ # CHECK-SAME: indexing_maps = [#[[$VertLineBCastMap]], #[[$HorLineBCastMap]], #[[$IdentMap2D]]]
+ # CHECK-SAME: ins(%[[VertLine]], %[[HorLine]] : tensor<8xf32>, tensor<16xf32>)
+ # CHECK-SAME: outs(%[[OutRect]] : tensor<8x16xf32>) -> tensor<8x16xf32>
+ linalg.elementwise(
+ vert_line,
+ hor_line,
+ outs=(out_rect,),
+ kind=linalg.ElementwiseKind.div,
+ indexing_maps=[
+ vert_line_bcast_map,
+ hor_line_bcast_map,
+ ident_map_2d,
+ ],
+ )
+
+ if _ops_with_non_ident_and_transposed_input_maps := True:
+ # CHECK: %[[VertLineBoolsMem:.*]] = memref.alloca() : memref<8xi1>
+ vert_line_bools_mem = memref.alloca(
+ MemRefType.get(vert_line_shape, IntegerType.get_signless(1)),
+ [],
+ [],
+ )
+ # CHECK: linalg.elementwise kind=#linalg.elementwise_kind<select>
+ # CHECK-SAME: indexing_maps = [#[[$VertLineBCastMap]], #[[$HorLineBCastMap]], #[[$TransMap2D]], #[[$IdentMap2D]]]
+ # CHECK-SAME: ins(%[[VertLineBoolsMem]], %[[HorLineMem]], %[[TransRectMem]] : memref<8xi1>, memref<16xf32>, memref<16x8xf32>)
+ # CHECK-SAME: outs(%[[OutRectMem]] : memref<8x16xf32>)
+ linalg.elementwise(
+ vert_line_bools_mem,
+ hor_line_mem,
+ trans_rect_mem,
+ outs=(out_rect_mem,),
+ kind=linalg.ElementwiseKind.select,
+ indexing_maps=[
+ vert_line_bcast_map,
+ hor_line_bcast_map,
+ transposed_map_2d,
+ ident_map_2d,
+ ],
+ )
+
+ print(module)
diff --git a/mlir/tools/mlir-opt/CMakeLists.txt b/mlir/tools/mlir-opt/CMakeLists.txt
index a5a4429..3220dca 100644
--- a/mlir/tools/mlir-opt/CMakeLists.txt
+++ b/mlir/tools/mlir-opt/CMakeLists.txt
@@ -46,6 +46,7 @@ if(MLIR_INCLUDE_TESTS)
MLIRTilingInterfaceTestPasses
MLIRTosaTestPasses
MLIRVectorTestPasses
+ MLIRXeGPUTestPasses
MLIRTestVectorToSPIRV
MLIRLLVMTestPasses
)
diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp
index 344576a..cdcf59b 100644
--- a/mlir/tools/mlir-opt/mlir-opt.cpp
+++ b/mlir/tools/mlir-opt/mlir-opt.cpp
@@ -158,6 +158,7 @@ void registerTestVectorLowerings();
void registerTestVectorReductionToSPIRVDotProd();
void registerTestVulkanRunnerPipeline();
void registerTestWrittenToPass();
+void registerTestXeGPULowerings();
#if MLIR_ENABLE_PDL_IN_PATTERNMATCH
void registerTestDialectConversionPasses();
void registerTestPDLByteCodePass();
@@ -301,6 +302,7 @@ void registerTestPasses() {
mlir::test::registerTestVectorReductionToSPIRVDotProd();
mlir::test::registerTestVulkanRunnerPipeline();
mlir::test::registerTestWrittenToPass();
+ mlir::test::registerTestXeGPULowerings();
#if MLIR_ENABLE_PDL_IN_PATTERNMATCH
mlir::test::registerTestDialectConversionPasses();
mlir::test::registerTestPDLByteCodePass();
diff --git a/offload/test/offloading/fortran/target-defaultmap-present.f90 b/offload/test/offloading/fortran/target-defaultmap-present.f90
new file mode 100644
index 0000000..3342db2
--- /dev/null
+++ b/offload/test/offloading/fortran/target-defaultmap-present.f90
@@ -0,0 +1,34 @@
+! This checks that the basic functionality of setting the implicit mapping
+! behaviour of a target region to present incurs the present behaviour for
+! the implicit map capture.
+! REQUIRES: flang, amdgpu
+! RUN: %libomptarget-compile-fortran-generic
+! RUN: %libomptarget-run-fail-generic 2>&1 \
+! RUN: | %fcheck-generic
+
+! NOTE: This should intentionally fatal error in omptarget as it's not
+! present, as is intended.
+subroutine target_data_not_present()
+ implicit none
+ double precision, dimension(:), allocatable :: arr
+ integer, parameter :: N = 16
+ integer :: i
+
+ allocate(arr(N))
+
+!$omp target defaultmap(present: allocatable)
+ do i = 1,N
+ arr(i) = 42.0d0
+ end do
+!$omp end target
+
+ deallocate(arr)
+ return
+end subroutine
+
+program map_present
+ implicit none
+ call target_data_not_present()
+end program
+
+!CHECK: omptarget message: device mapping required by 'present' map type modifier does not exist for host address{{.*}}
diff --git a/offload/test/offloading/fortran/target-defaultmap.f90 b/offload/test/offloading/fortran/target-defaultmap.f90
new file mode 100644
index 0000000..d718437
--- /dev/null
+++ b/offload/test/offloading/fortran/target-defaultmap.f90
@@ -0,0 +1,166 @@
+! Offloading test checking the use of the depend clause on the target construct
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-unknown-linux-gnu
+! UNSUPPORTED: x86_64-unknown-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+subroutine defaultmap_allocatable_present()
+ implicit none
+ integer, dimension(:), allocatable :: arr
+ integer :: N = 16
+ integer :: i
+
+ allocate(arr(N))
+
+!$omp target enter data map(to: arr)
+
+!$omp target defaultmap(present: allocatable)
+ do i = 1,N
+ arr(i) = N + 40
+ end do
+!$omp end target
+
+!$omp target exit data map(from: arr)
+
+ print *, arr
+ deallocate(arr)
+
+ return
+end subroutine
+
+subroutine defaultmap_scalar_tofrom()
+ implicit none
+ integer :: scalar_int
+ scalar_int = 10
+
+ !$omp target defaultmap(tofrom: scalar)
+ scalar_int = 20
+ !$omp end target
+
+ print *, scalar_int
+ return
+end subroutine
+
+subroutine defaultmap_all_default()
+ implicit none
+ integer, dimension(:), allocatable :: arr
+ integer :: aggregate(16)
+ integer :: N = 16
+ integer :: i, scalar_int
+
+ allocate(arr(N))
+
+ scalar_int = 10
+ aggregate = scalar_int
+
+ !$omp target defaultmap(default: all)
+ scalar_int = 20
+ do i = 1,N
+ arr(i) = scalar_int + aggregate(i)
+ end do
+ !$omp end target
+
+ print *, scalar_int
+ print *, arr
+
+ deallocate(arr)
+ return
+end subroutine
+
+subroutine defaultmap_pointer_to()
+ implicit none
+ integer, dimension(:), pointer :: arr_ptr(:)
+ integer :: scalar_int, i
+ allocate(arr_ptr(10))
+ arr_ptr = 10
+ scalar_int = 20
+
+ !$omp target defaultmap(to: pointer)
+ do i = 1,10
+ arr_ptr(i) = scalar_int + 20
+ end do
+ !$omp end target
+
+ print *, arr_ptr
+ deallocate(arr_ptr)
+ return
+end subroutine
+
+subroutine defaultmap_scalar_from()
+ implicit none
+ integer :: scalar_test
+ scalar_test = 10
+ !$omp target defaultmap(from: scalar)
+ scalar_test = 20
+ !$omp end target
+
+ print *, scalar_test
+ return
+end subroutine
+
+subroutine defaultmap_aggregate_to()
+ implicit none
+ integer :: aggregate_arr(16)
+ integer :: i, scalar_test = 0
+ aggregate_arr = 0
+ !$omp target map(tofrom: scalar_test) defaultmap(to: aggregate)
+ do i = 1,16
+ aggregate_arr(i) = i
+ scalar_test = scalar_test + aggregate_arr(i)
+ enddo
+ !$omp end target
+
+ print *, scalar_test
+ print *, aggregate_arr
+ return
+end subroutine
+
+subroutine defaultmap_dtype_aggregate_to()
+ implicit none
+ type :: dtype
+ real(4) :: i
+ real(4) :: j
+ integer(4) :: array_i(10)
+ integer(4) :: k
+ integer(4) :: array_j(10)
+ end type dtype
+
+ type(dtype) :: aggregate_type
+
+ aggregate_type%k = 20
+ aggregate_type%array_i = 30
+
+ !$omp target defaultmap(to: aggregate)
+ aggregate_type%k = 40
+ aggregate_type%array_i(1) = 50
+ !$omp end target
+
+ print *, aggregate_type%k
+ print *, aggregate_type%array_i(1)
+ return
+end subroutine
+
+program map_present
+ implicit none
+! CHECK: 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56
+ call defaultmap_allocatable_present()
+! CHECK: 20
+ call defaultmap_scalar_tofrom()
+! CHECK: 10
+! CHECK: 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30
+ call defaultmap_all_default()
+! CHECK: 10 10 10 10 10 10 10 10 10 10
+ call defaultmap_pointer_to()
+! CHECK: 20
+ call defaultmap_scalar_from()
+! CHECK: 136
+! CHECK: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ call defaultmap_aggregate_to()
+! CHECK: 20
+! CHECK: 30
+ call defaultmap_dtype_aggregate_to()
+end program
diff --git a/polly/lib/Analysis/DependenceInfo.cpp b/polly/lib/Analysis/DependenceInfo.cpp
index a530fa7..c620f40 100644
--- a/polly/lib/Analysis/DependenceInfo.cpp
+++ b/polly/lib/Analysis/DependenceInfo.cpp
@@ -992,7 +992,7 @@ DependenceInfoWrapperPass::getDependences(Scop *S,
if (It != ScopToDepsMap.end())
if (It->second) {
if (It->second->getDependenceLevel() == Level)
- return *It->second.get();
+ return *It->second;
}
return recomputeDependences(S, Level);
}
diff --git a/polly/lib/Analysis/ScopDetection.cpp b/polly/lib/Analysis/ScopDetection.cpp
index 260211b..43ed863 100644
--- a/polly/lib/Analysis/ScopDetection.cpp
+++ b/polly/lib/Analysis/ScopDetection.cpp
@@ -366,7 +366,7 @@ void ScopDetection::detect(Function &F) {
// Prune non-profitable regions.
for (auto &DIt : DetectionContextMap) {
- DetectionContext &DC = *DIt.getSecond().get();
+ DetectionContext &DC = *DIt.getSecond();
if (DC.Log.hasErrors())
continue;
if (!ValidRegions.count(&DC.CurRegion))
@@ -431,7 +431,7 @@ bool ScopDetection::isMaxRegionInScop(const Region &R, bool Verify) {
Entry = std::make_unique<DetectionContext>(const_cast<Region &>(R), AA,
/*Verifying=*/false);
- return isValidRegion(*Entry.get());
+ return isValidRegion(*Entry);
}
return true;
@@ -1496,7 +1496,7 @@ Region *ScopDetection::expandRegion(Region &R) {
std::unique_ptr<DetectionContext> &Entry = DetectionContextMap[P];
Entry = std::make_unique<DetectionContext>(*ExpandedRegion, AA,
/*Verifying=*/false);
- DetectionContext &Context = *Entry.get();
+ DetectionContext &Context = *Entry;
POLLY_DEBUG(dbgs() << "\t\tTrying " << ExpandedRegion->getNameStr()
<< "\n");
@@ -1554,7 +1554,7 @@ static bool regionWithoutLoops(Region &R, LoopInfo &LI) {
void ScopDetection::removeCachedResultsRecursively(const Region &R) {
for (auto &SubRegion : R) {
if (ValidRegions.count(SubRegion.get())) {
- removeCachedResults(*SubRegion.get());
+ removeCachedResults(*SubRegion);
} else
removeCachedResultsRecursively(*SubRegion);
}
@@ -1568,7 +1568,7 @@ void ScopDetection::findScops(Region &R) {
std::unique_ptr<DetectionContext> &Entry =
DetectionContextMap[getBBPairForRegion(&R)];
Entry = std::make_unique<DetectionContext>(R, AA, /*Verifying=*/false);
- DetectionContext &Context = *Entry.get();
+ DetectionContext &Context = *Entry;
bool DidBailout = true;
if (!PollyProcessUnprofitable && regionWithoutLoops(R, LI))
@@ -1834,7 +1834,7 @@ void ScopDetection::printLocations(Function &F) {
void ScopDetection::emitMissedRemarks(const Function &F) {
for (auto &DIt : DetectionContextMap) {
- DetectionContext &DC = *DIt.getSecond().get();
+ DetectionContext &DC = *DIt.getSecond();
if (DC.Log.hasErrors())
emitRejectionRemarks(DIt.getFirst(), DC.Log, ORE);
}
diff --git a/utils/bazel/llvm-project-overlay/lldb/BUILD.bazel b/utils/bazel/llvm-project-overlay/lldb/BUILD.bazel
index d872711..6ec7cb5 100644
--- a/utils/bazel/llvm-project-overlay/lldb/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/lldb/BUILD.bazel
@@ -1022,7 +1022,7 @@ expand_template(
substitutions = {
"${LLDB_VERSION}": PACKAGE_VERSION,
},
- template = "tools/lldb-dap/lldb-dap-Info.plist.in",
+ template = "tools/lldb-dap/tool/lldb-dap-Info.plist.in",
)
gentbl_cc_library(
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index fa2b6a0..ad00f2b 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -608,6 +608,7 @@ cc_library(
":DebugInfo",
":DebugInfoBTF",
":DebugInfoDWARF",
+ ":DebugInfoGSYM",
":DebugInfoPDB",
":Demangle",
":Object",
@@ -2170,6 +2171,7 @@ llvm_target_lib_list = [lib for lib in [
"lib/Target/AVR/AVRGenMCCodeEmitter.inc": ["-gen-emitter"],
"lib/Target/AVR/AVRGenInstrInfo.inc": ["-gen-instr-info"],
"lib/Target/AVR/AVRGenRegisterInfo.inc": ["-gen-register-info"],
+ "lib/Target/AVR/AVRGenSDNodeInfo.inc": ["-gen-sd-node-info"],
"lib/Target/AVR/AVRGenSubtargetInfo.inc": ["-gen-subtarget"],
},
},
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index da7b783..f73c5cf5 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -3419,6 +3419,7 @@ cc_library(
deps = [
":AffineUtils",
":Analysis",
+ ":DialectUtils",
":FunctionInterfaces",
":GPUDialect",
":GPUUtils",
@@ -8988,6 +8989,7 @@ cc_binary(
"//mlir/test:TestTransforms",
"//mlir/test:TestVector",
"//mlir/test:TestVectorToSPIRV",
+ "//mlir/test:TestXeGPU",
],
)
diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
index 95fb5fb..0ffa8ed 100644
--- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
@@ -1159,3 +1159,19 @@ cc_library(
"//mlir:Transforms",
],
)
+
+cc_library(
+ name = "TestXeGPU",
+ srcs = glob(["lib/Dialect/XeGPU/*.cpp"]),
+ includes = ["lib/Dialect/Test"],
+ deps = [
+ "//mlir:GPUDialect",
+ "//mlir:IR",
+ "//mlir:MemRefDialect",
+ "//mlir:Pass",
+ "//mlir:TransformUtils",
+ "//mlir:VectorTransforms",
+ "//mlir:XeGPUDialect",
+ "//mlir:XeGPUTransforms",
+ ],
+)